diff --git a/.vscode/launch.json b/.vscode/launch.json index 9cc2d04820..bdeb6c497a 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -22,6 +22,9 @@ "BUN_DEBUG_QUIET_LOGS": "1", "BUN_DEBUG_jest": "1", "BUN_GARBAGE_COLLECTOR_LEVEL": "1", + // "BUN_JSC_validateExceptionChecks": "1", + // "BUN_JSC_dumpSimulatedThrows": "1", + // "BUN_JSC_unexpectedExceptionStackTraceLimit": "20", }, "console": "internalConsole", "sourceMap": { diff --git a/.vscode/settings.json b/.vscode/settings.json index 717cb88fd5..c2c967c663 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -168,5 +168,5 @@ "WebKit/WebInspectorUI": true, }, "git.detectSubmodules": false, - "bun.test.customScript": "./build/debug/bun-debug test" + "bun.test.customScript": "./build/debug/bun-debug test", } diff --git a/bun.lock b/bun.lock index cb7e8f42c2..a6281bfcfb 100644 --- a/bun.lock +++ b/bun.lock @@ -6,6 +6,7 @@ "devDependencies": { "@lezer/common": "^1.2.3", "@lezer/cpp": "^1.1.3", + "@types/bun": "workspace:*", "bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8", "esbuild": "^0.21.4", "mitata": "^0.1.11", diff --git a/cmake/sources/JavaScriptSources.txt b/cmake/sources/JavaScriptSources.txt index f6a44973e0..1ae3a19d0e 100644 --- a/cmake/sources/JavaScriptSources.txt +++ b/cmake/sources/JavaScriptSources.txt @@ -65,6 +65,12 @@ src/js/internal/linkedlist.ts src/js/internal/primordials.js src/js/internal/promisify.ts src/js/internal/shared.ts +src/js/internal/sql/errors.ts +src/js/internal/sql/postgres.ts +src/js/internal/sql/query.ts +src/js/internal/sql/shared.ts +src/js/internal/sql/sqlite.ts +src/js/internal/sql/utils.ts src/js/internal/stream.promises.ts src/js/internal/stream.ts src/js/internal/streams/add-abort-signal.ts diff --git a/docs/api/sql.md b/docs/api/sql.md index 2b25208aa2..e033fe0916 100644 --- a/docs/api/sql.md +++ b/docs/api/sql.md @@ -1,20 +1,20 @@ -Bun provides native bindings for working with PostgreSQL databases with a modern, Promise-based API. The interface is designed to be simple and performant, using tagged template literals for queries and offering features like connection pooling, transactions, and prepared statements. +Bun provides native bindings for working with SQL databases through a unified Promise-based API that supports both PostgreSQL and SQLite. The interface is designed to be simple and performant, using tagged template literals for queries and offering features like connection pooling, transactions, and prepared statements. ```ts -import { sql } from "bun"; +import { sql, SQL } from "bun"; +// PostgreSQL (default) const users = await sql` SELECT * FROM users WHERE active = ${true} LIMIT ${10} `; -// Select with multiple conditions -const activeUsers = await sql` - SELECT * - FROM users - WHERE active = ${true} - AND age >= ${18} +// With a a SQLite db +const sqlite = new SQL("sqlite://myapp.db"); +const results = await sqlite` + SELECT * FROM users + WHERE active = ${1} `; ``` @@ -44,6 +44,115 @@ const activeUsers = await sql` {% /features %} +## Database Support + +Bun.SQL provides a unified API for multiple database systems: + +### PostgreSQL + +PostgreSQL is used when: + +- The connection string doesn't match SQLite patterns (it's the fallback adapter) +- The connection string explicitly uses `postgres://` or `postgresql://` protocols +- No connection string is provided and environment variables point to PostgreSQL + +```ts +import { sql } from "bun"; +// Uses PostgreSQL if DATABASE_URL is not set or is a PostgreSQL URL +await sql`SELECT ...`; + +import { SQL } from "bun"; +const pg = new SQL("postgres://user:pass@localhost:5432/mydb"); +await pg`SELECT ...`; +``` + +### SQLite + +SQLite support is now built into Bun.SQL, providing the same tagged template literal interface as PostgreSQL: + +```ts +import { SQL } from "bun"; + +// In-memory database +const memory = new SQL(":memory:"); +const memory2 = new SQL("sqlite://:memory:"); + +// File-based database +const db = new SQL("sqlite://myapp.db"); + +// Using options object +const db2 = new SQL({ + adapter: "sqlite", + filename: "./data/app.db", +}); + +// For simple filenames, specify adapter explicitly +const db3 = new SQL("myapp.db", { adapter: "sqlite" }); +``` + +
+SQLite Connection String Formats + +SQLite accepts various URL formats for connection strings: + +```ts +// Standard sqlite:// protocol +new SQL("sqlite://path/to/database.db"); +new SQL("sqlite:path/to/database.db"); // Without slashes + +// file:// protocol (also recognized as SQLite) +new SQL("file://path/to/database.db"); +new SQL("file:path/to/database.db"); + +// Special :memory: database +new SQL(":memory:"); +new SQL("sqlite://:memory:"); +new SQL("file://:memory:"); + +// Relative and absolute paths +new SQL("sqlite://./local.db"); // Relative to current directory +new SQL("sqlite://../parent/db.db"); // Parent directory +new SQL("sqlite:///absolute/path.db"); // Absolute path + +// With query parameters +new SQL("sqlite://data.db?mode=ro"); // Read-only mode +new SQL("sqlite://data.db?mode=rw"); // Read-write mode (no create) +new SQL("sqlite://data.db?mode=rwc"); // Read-write-create mode (default) +``` + +**Note:** Simple filenames without a protocol (like `"myapp.db"`) require explicitly specifying `{ adapter: "sqlite" }` to avoid ambiguity with PostgreSQL. + +
+ +
+SQLite-Specific Options + +SQLite databases support additional configuration options: + +```ts +const db = new SQL({ + adapter: "sqlite", + filename: "app.db", + + // SQLite-specific options + readonly: false, // Open in read-only mode + create: true, // Create database if it doesn't exist + readwrite: true, // Open for reading and writing + + // Additional Bun:sqlite options + strict: true, // Enable strict mode + safeIntegers: false, // Use JavaScript numbers for integers +}); +``` + +Query parameters in the URL are parsed to set these options: + +- `?mode=ro` → `readonly: true` +- `?mode=rw` → `readonly: false, create: false` +- `?mode=rwc` → `readonly: false, create: true` (default) + +
+ ### Inserting data You can pass JavaScript values directly to the SQL template literal and escaping will be handled for you. @@ -251,14 +360,55 @@ await query; ## Database Environment Variables -`sql` connection parameters can be configured using environment variables. The client checks these variables in a specific order of precedence. +`sql` connection parameters can be configured using environment variables. The client checks these variables in a specific order of precedence and automatically detects the database type based on the connection string format. -The following environment variables can be used to define the connection URL: +### Automatic Database Detection + +When using `Bun.sql()` without arguments or `new SQL()` with a connection string, the adapter is automatically detected based on the URL format. SQLite becomes the default adapter in these cases: + +#### SQLite Auto-Detection + +SQLite is automatically selected when the connection string matches these patterns: + +- `:memory:` - In-memory database +- `sqlite://...` - SQLite protocol URLs +- `sqlite:...` - SQLite protocol without slashes +- `file://...` - File protocol URLs +- `file:...` - File protocol without slashes + +```ts +// These all use SQLite automatically (no adapter needed) +const sql1 = new SQL(":memory:"); +const sql2 = new SQL("sqlite://app.db"); +const sql3 = new SQL("file://./database.db"); + +// Works with DATABASE_URL environment variable +DATABASE_URL=":memory:" bun run app.js +DATABASE_URL="sqlite://myapp.db" bun run app.js +DATABASE_URL="file://./data/app.db" bun run app.js +``` + +#### PostgreSQL Auto-Detection + +PostgreSQL is the default for all other connection strings: + +```bash +# PostgreSQL is detected for these patterns +DATABASE_URL="postgres://user:pass@localhost:5432/mydb" bun run app.js +DATABASE_URL="postgresql://user:pass@localhost:5432/mydb" bun run app.js + +# Or any URL that doesn't match SQLite patterns +DATABASE_URL="localhost:5432/mydb" bun run app.js +``` + +### PostgreSQL Environment Variables + +The following environment variables can be used to define the PostgreSQL connection: | Environment Variable | Description | | --------------------------- | ------------------------------------------ | | `POSTGRES_URL` | Primary connection URL for PostgreSQL | -| `DATABASE_URL` | Alternative connection URL | +| `DATABASE_URL` | Alternative connection URL (auto-detected) | | `PGURL` | Alternative connection URL | | `PG_URL` | Alternative connection URL | | `TLS_POSTGRES_DATABASE_URL` | SSL/TLS-enabled connection URL | @@ -274,6 +424,19 @@ If no connection URL is provided, the system checks for the following individual | `PGPASSWORD` | - | (empty) | Database password | | `PGDATABASE` | - | username | Database name | +### SQLite Environment Variables + +SQLite connections can be configured via `DATABASE_URL` when it contains a SQLite-compatible URL: + +```bash +# These are all recognized as SQLite +DATABASE_URL=":memory:" +DATABASE_URL="sqlite://./app.db" +DATABASE_URL="file:///absolute/path/to/db.sqlite" +``` + +**Note:** PostgreSQL-specific environment variables (`POSTGRES_URL`, `PGHOST`, etc.) are ignored when using SQLite. + ## Runtime Preconnection Bun can preconnect to PostgreSQL at startup to improve performance by establishing database connections before your application code runs. This is useful for reducing connection latency on the first database query. @@ -293,16 +456,18 @@ The `--sql-preconnect` flag will automatically establish a PostgreSQL connection ## Connection Options -You can configure your database connection manually by passing options to the SQL constructor: +You can configure your database connection manually by passing options to the SQL constructor. Options vary depending on the database adapter: + +### PostgreSQL Options ```ts import { SQL } from "bun"; const db = new SQL({ - // Required + // Connection details (adapter is auto-detected as PostgreSQL) url: "postgres://user:pass@localhost:5432/dbname", - // Optional configuration + // Alternative connection parameters hostname: "localhost", port: 5432, database: "myapp", @@ -330,14 +495,53 @@ const db = new SQL({ // Callbacks onconnect: client => { - console.log("Connected to database"); + console.log("Connected to PostgreSQL"); }, onclose: client => { - console.log("Connection closed"); + console.log("PostgreSQL connection closed"); }, }); ``` +### SQLite Options + +```ts +import { SQL } from "bun"; + +const db = new SQL({ + // Required for SQLite + adapter: "sqlite", + filename: "./data/app.db", // or ":memory:" for in-memory database + + // SQLite-specific access modes + readonly: false, // Open in read-only mode + create: true, // Create database if it doesn't exist + readwrite: true, // Allow read and write operations + + // SQLite data handling + strict: true, // Enable strict mode for better type safety + safeIntegers: false, // Use BigInt for integers exceeding JS number range + + // Callbacks + onconnect: client => { + console.log("SQLite database opened"); + }, + onclose: client => { + console.log("SQLite database closed"); + }, +}); +``` + +
+SQLite Connection Notes + +- **Connection Pooling**: SQLite doesn't use connection pooling as it's a file-based database. Each `SQL` instance represents a single connection. +- **Transactions**: SQLite supports nested transactions through savepoints, similar to PostgreSQL. +- **Concurrent Access**: SQLite handles concurrent access through file locking. Use WAL mode for better concurrency. +- **Memory Databases**: Using `:memory:` creates a temporary database that exists only for the connection lifetime. + +
+ ## Dynamic passwords When clients need to use alternative authentication schemes such as access tokens or connections to databases with rotating passwords, provide either a synchronous or asynchronous function that will resolve the dynamic password value at connection time. @@ -353,11 +557,66 @@ const sql = new SQL(url, { }); ``` +## SQLite-Specific Features + +### Query Execution + +SQLite executes queries synchronously, unlike PostgreSQL which uses asynchronous I/O. However, the API remains consistent using Promises: + +```ts +const sqlite = new SQL("sqlite://app.db"); + +// Works the same as PostgreSQL, but executes synchronously under the hood +const users = await sqlite`SELECT * FROM users`; + +// Parameters work identically +const user = await sqlite`SELECT * FROM users WHERE id = ${userId}`; +``` + +### SQLite Pragmas + +You can use PRAGMA statements to configure SQLite behavior: + +```ts +const sqlite = new SQL("sqlite://app.db"); + +// Enable foreign keys +await sqlite`PRAGMA foreign_keys = ON`; + +// Set journal mode to WAL for better concurrency +await sqlite`PRAGMA journal_mode = WAL`; + +// Check integrity +const integrity = await sqlite`PRAGMA integrity_check`; +``` + +### Data Type Differences + +SQLite has a more flexible type system than PostgreSQL: + +```ts +// SQLite stores data in 5 storage classes: NULL, INTEGER, REAL, TEXT, BLOB +const sqlite = new SQL("sqlite://app.db"); + +// SQLite is more lenient with types +await sqlite` + CREATE TABLE flexible ( + id INTEGER PRIMARY KEY, + data TEXT, -- Can store numbers as strings + value NUMERIC, -- Can store integers, reals, or text + blob BLOB -- Binary data + ) +`; + +// JavaScript values are automatically converted +await sqlite`INSERT INTO flexible VALUES (${1}, ${"text"}, ${123.45}, ${Buffer.from("binary")})`; +``` + ## Transactions -To start a new transaction, use `sql.begin`. This method reserves a dedicated connection for the duration of the transaction and provides a scoped `sql` instance to use within the callback function. Once the callback completes, `sql.begin` resolves with the return value of the callback. +To start a new transaction, use `sql.begin`. This method works for both PostgreSQL and SQLite. For PostgreSQL, it reserves a dedicated connection from the pool. For SQLite, it begins a transaction on the single connection. -The `BEGIN` command is sent automatically, including any optional configurations you specify. If an error occurs during the transaction, a `ROLLBACK` is triggered to release the reserved connection and ensure the process continues smoothly. +The `BEGIN` command is sent automatically, including any optional configurations you specify. If an error occurs during the transaction, a `ROLLBACK` is triggered to ensure the process continues smoothly. ### Basic Transactions @@ -552,9 +811,34 @@ Note that disabling prepared statements may impact performance for queries that ## Error Handling -The client provides typed errors for different failure scenarios: +The client provides typed errors for different failure scenarios. Errors are database-specific and extend from base error classes: -### Connection Errors +### Error Classes + +```ts +import { SQL } from "bun"; + +try { + await sql`SELECT * FROM users`; +} catch (error) { + if (error instanceof SQL.PostgresError) { + // PostgreSQL-specific error + console.log(error.code); // PostgreSQL error code + console.log(error.detail); // Detailed error message + console.log(error.hint); // Helpful hint from PostgreSQL + } else if (error instanceof SQL.SQLiteError) { + // SQLite-specific error + console.log(error.code); // SQLite error code (e.g., "SQLITE_CONSTRAINT") + console.log(error.errno); // SQLite error number + console.log(error.byteOffset); // Byte offset in SQL statement (if available) + } else if (error instanceof SQL.SQLError) { + // Generic SQL error (base class) + console.log(error.message); + } +} +``` + +### PostgreSQL Connection Errors | Connection Errors | Description | | --------------------------------- | ---------------------------------------------------- | @@ -619,6 +903,50 @@ The client provides typed errors for different failure scenarios: | `ERR_POSTGRES_UNSAFE_TRANSACTION` | Unsafe transaction operation detected | | `ERR_POSTGRES_INVALID_TRANSACTION_STATE` | Invalid transaction state | +### SQLite-Specific Errors + +SQLite errors provide error codes and numbers that correspond to SQLite's standard error codes: + +
+Common SQLite Error Codes + +| Error Code | errno | Description | +| ------------------- | ----- | ---------------------------------------------------- | +| `SQLITE_CONSTRAINT` | 19 | Constraint violation (UNIQUE, CHECK, NOT NULL, etc.) | +| `SQLITE_BUSY` | 5 | Database is locked | +| `SQLITE_LOCKED` | 6 | Table in the database is locked | +| `SQLITE_READONLY` | 8 | Attempt to write to a readonly database | +| `SQLITE_IOERR` | 10 | Disk I/O error | +| `SQLITE_CORRUPT` | 11 | Database disk image is malformed | +| `SQLITE_FULL` | 13 | Database or disk is full | +| `SQLITE_CANTOPEN` | 14 | Unable to open database file | +| `SQLITE_PROTOCOL` | 15 | Database lock protocol error | +| `SQLITE_SCHEMA` | 17 | Database schema has changed | +| `SQLITE_TOOBIG` | 18 | String or BLOB exceeds size limit | +| `SQLITE_MISMATCH` | 20 | Data type mismatch | +| `SQLITE_MISUSE` | 21 | Library used incorrectly | +| `SQLITE_AUTH` | 23 | Authorization denied | + +Example error handling: + +```ts +const sqlite = new SQL("sqlite://app.db"); + +try { + await sqlite`INSERT INTO users (id, name) VALUES (1, 'Alice')`; + await sqlite`INSERT INTO users (id, name) VALUES (1, 'Bob')`; // Duplicate ID +} catch (error) { + if (error instanceof SQL.SQLiteError) { + if (error.code === "SQLITE_CONSTRAINT") { + console.log("Constraint violation:", error.message); + // Handle unique constraint violation + } + } +} +``` + +
+ ## Numbers and BigInt Bun's SQL client includes special handling for large numbers that exceed the range of a 53-bit integer. Here's how it works: @@ -652,7 +980,6 @@ There's still some things we haven't finished yet. - Connection preloading via `--db-preconnect` Bun CLI flag - MySQL support: [we're working on it](https://github.com/oven-sh/bun/pull/15274) -- SQLite support: planned, but not started. Ideally, we implement it natively instead of wrapping `bun:sqlite`. - Column name transforms (e.g. `snake_case` to `camelCase`). This is mostly blocked on a unicode-aware implementation of changing the case in C++ using WebKit's `WTF::String`. - Column type transforms diff --git a/package.json b/package.json index 37679ba081..7ed45840be 100644 --- a/package.json +++ b/package.json @@ -7,9 +7,10 @@ "./packages/@types/bun" ], "devDependencies": { - "bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8", "@lezer/common": "^1.2.3", "@lezer/cpp": "^1.1.3", + "@types/bun": "workspace:*", + "bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8", "esbuild": "^0.21.4", "mitata": "^0.1.11", "peechy": "0.4.34", diff --git a/packages/bun-types/bun.d.ts b/packages/bun-types/bun.d.ts index bb24cbb32c..6e950b768f 100644 --- a/packages/bun-types/bun.d.ts +++ b/packages/bun-types/bun.d.ts @@ -14,7 +14,6 @@ * This module aliases `globalThis.Bun`. */ declare module "bun" { - type DistributedOmit = T extends T ? Omit : never; type PathLike = string | NodeJS.TypedArray | ArrayBufferLike | URL; type ArrayBufferView = | NodeJS.TypedArray @@ -68,39 +67,31 @@ declare module "bun" { ? T : Otherwise // Not defined in lib dom (or anywhere else), so no conflict. We can safely use our own definition : Otherwise; // Lib dom not loaded anyway, so no conflict. We can safely use our own definition + + /** + * Like Omit, but correctly distributes over unions. Most useful for removing + * properties from union options objects, like {@link Bun.SQL.Options} + * + * @example + * ```ts + * type X = Bun.DistributedOmit<{type?: 'a', url?: string} | {type?: 'b', flag?: boolean}, "url"> + * // `{type?: 'a'} | {type?: 'b', flag?: boolean}` (Omit applied to each union item instead of entire type) + * + * type X = Omit<{type?: 'a', url?: string} | {type?: 'b', flag?: boolean}, "url">; + * // `{type?: "a" | "b" | undefined}` (Missing `flag` property and no longer a union) + * ``` + */ + type DistributedOmit = T extends T ? Omit : never; + + type KeysInBoth = Extract; + type MergeInner = Omit> & + Omit> & { + [Key in KeysInBoth]: A[Key] | B[Key]; + }; + type Merge = MergeInner & MergeInner; + type DistributedMerge = T extends T ? Merge> : never; } - /** @deprecated This type is unused in Bun's types and might be removed in the near future */ - type Platform = - | "aix" - | "android" - | "darwin" - | "freebsd" - | "haiku" - | "linux" - | "openbsd" - | "sunos" - | "win32" - | "cygwin" - | "netbsd"; - - /** @deprecated This type is unused in Bun's types and might be removed in the near future */ - type Architecture = "arm" | "arm64" | "ia32" | "mips" | "mipsel" | "ppc" | "ppc64" | "s390" | "s390x" | "x64"; - - /** @deprecated This type is unused in Bun's types and might be removed in the near future */ - type UncaughtExceptionListener = (error: Error, origin: UncaughtExceptionOrigin) => void; - - /** - * Most of the time the unhandledRejection will be an Error, but this should not be relied upon - * as *anything* can be thrown/rejected, it is therefore unsafe to assume that the value is an Error. - * - * @deprecated This type is unused in Bun's types and might be removed in the near future - */ - type UnhandledRejectionListener = (reason: unknown, promise: Promise) => void; - - /** @deprecated This type is unused in Bun's types and might be removed in the near future */ - type MultipleResolveListener = (type: MultipleResolveType, promise: Promise, value: unknown) => void; - interface ErrorEventInit extends EventInit { colno?: number; error?: any; @@ -1276,678 +1267,6 @@ declare module "bun" { stat(): Promise; } - namespace SQL { - type AwaitPromisesArray>> = { - [K in keyof T]: Awaited; - }; - - type ContextCallbackResult = T extends Array> ? AwaitPromisesArray : Awaited; - type ContextCallback = (sql: SQL) => Promise; - - /** - * Configuration options for SQL client connection and behavior - * - * @example - * ```ts - * const config: Bun.SQL.Options = { - * host: 'localhost', - * port: 5432, - * user: 'dbuser', - * password: 'secretpass', - * database: 'myapp', - * idleTimeout: 30, - * max: 20, - * onconnect: (client) => { - * console.log('Connected to database'); - * } - * }; - * ``` - */ - interface Options { - /** - * Connection URL (can be string or URL object) - */ - url?: URL | string | undefined; - - /** - * Database server hostname - * @default "localhost" - */ - host?: string | undefined; - - /** - * Database server hostname (alias for host) - * @deprecated Prefer {@link host} - * @default "localhost" - */ - hostname?: string | undefined; - - /** - * Database server port number - * @default 5432 - */ - port?: number | string | undefined; - - /** - * Database user for authentication - * @default "postgres" - */ - username?: string | undefined; - - /** - * Database user for authentication (alias for username) - * @deprecated Prefer {@link username} - * @default "postgres" - */ - user?: string | undefined; - - /** - * Database password for authentication - * @default "" - */ - password?: string | (() => MaybePromise) | undefined; - - /** - * Database password for authentication (alias for password) - * @deprecated Prefer {@link password} - * @default "" - */ - pass?: string | (() => MaybePromise) | undefined; - - /** - * Name of the database to connect to - * @default The username value - */ - database?: string | undefined; - - /** - * Name of the database to connect to (alias for database) - * @deprecated Prefer {@link database} - * @default The username value - */ - db?: string | undefined; - - /** - * Database adapter/driver to use - * @default "postgres" - */ - adapter?: "postgres" /*| "sqlite" | "mysql"*/ | (string & {}) | undefined; - - /** - * Maximum time in seconds to wait for connection to become available - * @default 0 (no timeout) - */ - idleTimeout?: number | undefined; - - /** - * Maximum time in seconds to wait for connection to become available (alias for idleTimeout) - * @deprecated Prefer {@link idleTimeout} - * @default 0 (no timeout) - */ - idle_timeout?: number | undefined; - - /** - * Maximum time in seconds to wait when establishing a connection - * @default 30 - */ - connectionTimeout?: number | undefined; - - /** - * Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout) - * @deprecated Prefer {@link connectionTimeout} - * @default 30 - */ - connection_timeout?: number | undefined; - - /** - * Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout) - * @deprecated Prefer {@link connectionTimeout} - * @default 30 - */ - connectTimeout?: number | undefined; - - /** - * Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout) - * @deprecated Prefer {@link connectionTimeout} - * @default 30 - */ - connect_timeout?: number | undefined; - - /** - * Maximum lifetime in seconds of a connection - * @default 0 (no maximum lifetime) - */ - maxLifetime?: number | undefined; - - /** - * Maximum lifetime in seconds of a connection (alias for maxLifetime) - * @deprecated Prefer {@link maxLifetime} - * @default 0 (no maximum lifetime) - */ - max_lifetime?: number | undefined; - - /** - * Whether to use TLS/SSL for the connection - * @default false - */ - tls?: TLSOptions | boolean | undefined; - - /** - * Whether to use TLS/SSL for the connection (alias for tls) - * @default false - */ - ssl?: TLSOptions | boolean | undefined; - - // `.path` is currently unsupported in Bun, the implementation is incomplete. - // - // /** - // * Unix domain socket path for connection - // * @default "" - // */ - // path?: string | undefined; - - /** - * Callback function executed when a connection is established - */ - onconnect?: ((client: SQL) => void) | undefined; - - /** - * Callback function executed when a connection is closed - */ - onclose?: ((client: SQL) => void) | undefined; - - /** - * Postgres client runtime configuration options - * - * @see https://www.postgresql.org/docs/current/runtime-config-client.html - */ - connection?: Record | undefined; - - /** - * Maximum number of connections in the pool - * @default 10 - */ - max?: number | undefined; - - /** - * By default values outside i32 range are returned as strings. If this is true, values outside i32 range are returned as BigInts. - * @default false - */ - bigint?: boolean | undefined; - - /** - * Automatic creation of prepared statements - * @default true - */ - prepare?: boolean | undefined; - } - - /** - * Represents a SQL query that can be executed, with additional control methods - * Extends Promise to allow for async/await usage - */ - interface Query extends Promise { - /** - * Indicates if the query is currently executing - */ - active: boolean; - - /** - * Indicates if the query has been cancelled - */ - cancelled: boolean; - - /** - * Cancels the executing query - */ - cancel(): Query; - - /** - * Executes the query as a simple query, no parameters are allowed but can execute multiple commands separated by semicolons - */ - simple(): Query; - - /** - * Executes the query - */ - execute(): Query; - - /** - * Returns the raw query result - */ - raw(): Query; - - /** - * Returns only the values from the query result - */ - values(): Query; - } - - /** - * Callback function type for transaction contexts - * @param sql Function to execute SQL queries within the transaction - */ - type TransactionContextCallback = ContextCallback; - - /** - * Callback function type for savepoint contexts - * @param sql Function to execute SQL queries within the savepoint - */ - type SavepointContextCallback = ContextCallback; - - /** - * SQL.Helper represents a parameter or serializable - * value inside of a query. - * - * @example - * ```ts - * const helper = sql(users, 'id'); - * await sql`insert into users ${helper}`; - * ``` - */ - interface Helper { - readonly value: T[]; - readonly columns: (keyof T)[]; - } - } - - /** - * Main SQL client interface providing connection and transaction management - */ - interface SQL extends AsyncDisposable { - /** - * Executes a SQL query using template literals - * @example - * ```ts - * const [user] = await sql`select * from users where id = ${1}`; - * ``` - */ - (strings: TemplateStringsArray, ...values: unknown[]): SQL.Query; - - /** - * Execute a SQL query using a string - * - * @example - * ```ts - * const users = await sql`SELECT * FROM users WHERE id = ${1}`; - * ``` - */ - (string: string): SQL.Query; - - /** - * Helper function for inserting an object into a query - * - * @example - * ```ts - * // Insert an object - * const result = await sql`insert into users ${sql(users)} returning *`; - * - * // Or pick specific columns - * const result = await sql`insert into users ${sql(users, "id", "name")} returning *`; - * - * // Or a single object - * const result = await sql`insert into users ${sql(user)} returning *`; - * ``` - */ - (obj: T | T[] | readonly T[]): SQL.Helper; - - /** - * Helper function for inserting an object into a query, supporting specific columns - * - * @example - * ```ts - * // Insert an object - * const result = await sql`insert into users ${sql(users)} returning *`; - * - * // Or pick specific columns - * const result = await sql`insert into users ${sql(users, "id", "name")} returning *`; - * - * // Or a single object - * const result = await sql`insert into users ${sql(user)} returning *`; - * ``` - */ - ( - obj: T | T[] | readonly T[], - ...columns: readonly Keys[] - ): SQL.Helper>; - - /** - * Helper function for inserting any serializable value into a query - * - * @example - * ```ts - * const result = await sql`SELECT * FROM users WHERE id IN ${sql([1, 2, 3])}`; - * ``` - */ - (value: T): SQL.Helper; - - /** - * Commits a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL - * - * @param name - The name of the distributed transaction - * - * @example - * ```ts - * await sql.commitDistributed("my_distributed_transaction"); - * ``` - */ - commitDistributed(name: string): Promise; - - /** - * Rolls back a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL - * - * @param name - The name of the distributed transaction - * - * @example - * ```ts - * await sql.rollbackDistributed("my_distributed_transaction"); - * ``` - */ - rollbackDistributed(name: string): Promise; - - /** Waits for the database connection to be established - * - * @example - * ```ts - * await sql.connect(); - * ``` - */ - connect(): Promise; - - /** - * Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing. - * - * @param options - The options for the close - * - * @example - * ```ts - * await sql.close({ timeout: 1 }); - * ``` - */ - close(options?: { timeout?: number }): Promise; - - /** - * Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing. - * This is an alias of {@link SQL.close} - * - * @param options - The options for the close - * - * @example - * ```ts - * await sql.end({ timeout: 1 }); - * ``` - */ - end(options?: { timeout?: number }): Promise; - - /** - * Flushes any pending operations - * - * @example - * ```ts - * sql.flush(); - * ``` - */ - flush(): void; - - /** - * The reserve method pulls out a connection from the pool, and returns a client that wraps the single connection. - * - * This can be used for running queries on an isolated connection. - * Calling reserve in a reserved Sql will return a new reserved connection, not the same connection (behavior matches postgres package). - * - * @example - * ```ts - * const reserved = await sql.reserve(); - * await reserved`select * from users`; - * await reserved.release(); - * // with in a production scenario would be something more like - * const reserved = await sql.reserve(); - * try { - * // ... queries - * } finally { - * await reserved.release(); - * } - * - * // Bun supports Symbol.dispose and Symbol.asyncDispose - * { - * // always release after context (safer) - * using reserved = await sql.reserve() - * await reserved`select * from users` - * } - * ``` - */ - reserve(): Promise; - - /** - * Begins a new transaction. - * - * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function. - * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. - * @example - * const [user, account] = await sql.begin(async sql => { - * const [user] = await sql` - * insert into users ( - * name - * ) values ( - * 'Murray' - * ) - * returning * - * ` - * const [account] = await sql` - * insert into accounts ( - * user_id - * ) values ( - * ${ user.user_id } - * ) - * returning * - * ` - * return [user, account] - * }) - */ - begin(fn: SQL.TransactionContextCallback): Promise>; - - /** - * Begins a new transaction with options. - * - * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function. - * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. - * @example - * const [user, account] = await sql.begin("read write", async sql => { - * const [user] = await sql` - * insert into users ( - * name - * ) values ( - * 'Murray' - * ) - * returning * - * ` - * const [account] = await sql` - * insert into accounts ( - * user_id - * ) values ( - * ${ user.user_id } - * ) - * returning * - * ` - * return [user, account] - * }) - */ - begin(options: string, fn: SQL.TransactionContextCallback): Promise>; - - /** - * Alternative method to begin a transaction. - * - * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function. - * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. - * @alias begin - * @example - * const [user, account] = await sql.transaction(async sql => { - * const [user] = await sql` - * insert into users ( - * name - * ) values ( - * 'Murray' - * ) - * returning * - * ` - * const [account] = await sql` - * insert into accounts ( - * user_id - * ) values ( - * ${ user.user_id } - * ) - * returning * - * ` - * return [user, account] - * }) - */ - transaction(fn: SQL.TransactionContextCallback): Promise>; - - /** - * Alternative method to begin a transaction with options - * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function. - * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. - * - * @alias {@link begin} - * - * @example - * const [user, account] = await sql.transaction("read write", async sql => { - * const [user] = await sql` - * insert into users ( - * name - * ) values ( - * 'Murray' - * ) - * returning * - * ` - * const [account] = await sql` - * insert into accounts ( - * user_id - * ) values ( - * ${ user.user_id } - * ) - * returning * - * ` - * return [user, account] - * }); - */ - transaction(options: string, fn: SQL.TransactionContextCallback): Promise>; - - /** - * Begins a distributed transaction - * Also know as Two-Phase Commit, in a distributed transaction, Phase 1 involves the coordinator preparing nodes by ensuring data is written and ready to commit, while Phase 2 finalizes with nodes committing or rolling back based on the coordinator's decision, ensuring durability and releasing locks. - * In PostgreSQL and MySQL distributed transactions persist beyond the original session, allowing privileged users or coordinators to commit/rollback them, ensuring support for distributed transactions, recovery, and administrative tasks. - * beginDistributed will automatic rollback if any exception are not caught, and you can commit and rollback later if everything goes well. - * PostgreSQL natively supports distributed transactions using PREPARE TRANSACTION, while MySQL uses XA Transactions, and MSSQL also supports distributed/XA transactions. However, in MSSQL, distributed transactions are tied to the original session, the DTC coordinator, and the specific connection. - * These transactions are automatically committed or rolled back following the same rules as regular transactions, with no option for manual intervention from other sessions, in MSSQL distributed transactions are used to coordinate transactions using Linked Servers. - * - * @example - * await sql.beginDistributed("numbers", async sql => { - * await sql`create table if not exists numbers (a int)`; - * await sql`insert into numbers values(1)`; - * }); - * // later you can call - * await sql.commitDistributed("numbers"); - * // or await sql.rollbackDistributed("numbers"); - */ - beginDistributed( - name: string, - fn: SQL.TransactionContextCallback, - ): Promise>; - - /** Alternative method to begin a distributed transaction - * @alias {@link beginDistributed} - */ - distributed(name: string, fn: SQL.TransactionContextCallback): Promise>; - - /**If you know what you're doing, you can use unsafe to pass any string you'd like. - * Please note that this can lead to SQL injection if you're not careful. - * You can also nest sql.unsafe within a safe sql expression. This is useful if only part of your fraction has unsafe elements. - * @example - * const result = await sql.unsafe(`select ${danger} from users where id = ${dragons}`) - */ - unsafe(string: string, values?: any[]): SQL.Query; - - /** - * Reads a file and uses the contents as a query. - * Optional parameters can be used if the file includes $1, $2, etc - * @example - * const result = await sql.file("query.sql", [1, 2, 3]); - */ - file(filename: string, values?: any[]): SQL.Query; - - /** - * Current client options - */ - options: SQL.Options; - } - - const SQL: { - /** - * Creates a new SQL client instance - * - * @param connectionString - The connection string for the SQL client - * - * @example - * ```ts - * const sql = new SQL("postgres://localhost:5432/mydb"); - * const sql = new SQL(new URL("postgres://localhost:5432/mydb")); - * ``` - */ - new (connectionString: string | URL): SQL; - - /** - * Creates a new SQL client instance with options - * - * @param connectionString - The connection string for the SQL client - * @param options - The options for the SQL client - * - * @example - * ```ts - * const sql = new SQL("postgres://localhost:5432/mydb", { idleTimeout: 1000 }); - * ``` - */ - new (connectionString: string | URL, options: Omit): SQL; - - /** - * Creates a new SQL client instance with options - * - * @param options - The options for the SQL client - * - * @example - * ```ts - * const sql = new SQL({ url: "postgres://localhost:5432/mydb", idleTimeout: 1000 }); - * ``` - */ - new (options?: SQL.Options): SQL; - }; - - /** - * Represents a reserved connection from the connection pool - * Extends SQL with additional release functionality - */ - interface ReservedSQL extends SQL, Disposable { - /** - * Releases the client back to the connection pool - */ - release(): void; - } - - /** - * Represents a client within a transaction context - * Extends SQL with savepoint functionality - */ - interface TransactionSQL extends SQL { - /** Creates a savepoint within the current transaction */ - savepoint(name: string, fn: SQLSavepointContextCallback): Promise; - savepoint(fn: SQLSavepointContextCallback): Promise; - } - - /** - * Represents a savepoint within a transaction - */ - interface SavepointSQL extends SQL {} - type CSRFAlgorithm = "blake2b256" | "blake2b512" | "sha256" | "sha384" | "sha512" | "sha512-256"; interface CSRFGenerateOptions { @@ -1995,16 +1314,6 @@ declare module "bun" { maxAge?: number; } - /** - * SQL client - */ - const sql: SQL; - - /** - * SQL client for PostgreSQL - */ - const postgres: SQL; - /** * Generate and verify CSRF tokens * @@ -4383,11 +3692,11 @@ declare module "bun" { * The type of options that can be passed to {@link serve}, with support for `routes` and a safer requirement for `fetch` */ type ServeFunctionOptions> }> = - | (DistributedOmit, WebSocketServeOptions>, "fetch"> & { + | (__internal.DistributedOmit, WebSocketServeOptions>, "fetch"> & { routes: R; fetch?: (this: Server, request: Request, server: Server) => Response | Promise; }) - | (DistributedOmit, WebSocketServeOptions>, "routes"> & { + | (__internal.DistributedOmit, WebSocketServeOptions>, "routes"> & { routes?: never; fetch: (this: Server, request: Request, server: Server) => Response | Promise; }) diff --git a/packages/bun-types/deprecated.d.ts b/packages/bun-types/deprecated.d.ts index 0b8cee7818..543661c473 100644 --- a/packages/bun-types/deprecated.d.ts +++ b/packages/bun-types/deprecated.d.ts @@ -1,4 +1,35 @@ declare module "bun" { + /** @deprecated This type is unused in Bun's types and might be removed in the near future */ + type Platform = + | "aix" + | "android" + | "darwin" + | "freebsd" + | "haiku" + | "linux" + | "openbsd" + | "sunos" + | "win32" + | "cygwin" + | "netbsd"; + + /** @deprecated This type is unused in Bun's types and might be removed in the near future */ + type Architecture = "arm" | "arm64" | "ia32" | "mips" | "mipsel" | "ppc" | "ppc64" | "s390" | "s390x" | "x64"; + + /** @deprecated This type is unused in Bun's types and might be removed in the near future */ + type UncaughtExceptionListener = (error: Error, origin: UncaughtExceptionOrigin) => void; + + /** + * Most of the time the unhandledRejection will be an Error, but this should not be relied upon + * as *anything* can be thrown/rejected, it is therefore unsafe to assume that the value is an Error. + * + * @deprecated This type is unused in Bun's types and might be removed in the near future + */ + type UnhandledRejectionListener = (reason: unknown, promise: Promise) => void; + + /** @deprecated This type is unused in Bun's types and might be removed in the near future */ + type MultipleResolveListener = (type: MultipleResolveType, promise: Promise, value: unknown) => void; + /** * Consume all data from a {@link ReadableStream} until it closes or errors. * diff --git a/packages/bun-types/index.d.ts b/packages/bun-types/index.d.ts index c5b488ba22..870e2ae463 100644 --- a/packages/bun-types/index.d.ts +++ b/packages/bun-types/index.d.ts @@ -21,6 +21,7 @@ /// /// /// +/// /// diff --git a/packages/bun-types/overrides.d.ts b/packages/bun-types/overrides.d.ts index f52de8acbf..b4f9f97ad1 100644 --- a/packages/bun-types/overrides.d.ts +++ b/packages/bun-types/overrides.d.ts @@ -24,6 +24,12 @@ declare module "stream/web" { } } +declare module "url" { + interface URLSearchParams { + toJSON(): Record; + } +} + declare global { namespace NodeJS { interface ProcessEnv extends Bun.Env {} diff --git a/packages/bun-types/sql.d.ts b/packages/bun-types/sql.d.ts new file mode 100644 index 0000000000..a85278b8c5 --- /dev/null +++ b/packages/bun-types/sql.d.ts @@ -0,0 +1,805 @@ +import type * as BunSQLite from "bun:sqlite"; + +declare module "bun" { + /** + * Represents a reserved connection from the connection pool Extends SQL with + * additional release functionality + */ + interface ReservedSQL extends SQL, Disposable { + /** + * Releases the client back to the connection pool + */ + release(): void; + } + + /** + * Represents a client within a transaction context Extends SQL with savepoint + * functionality + */ + interface TransactionSQL extends SQL { + /** + * Creates a savepoint within the current transaction + */ + savepoint(name: string, fn: SQL.SavepointContextCallback): Promise; + savepoint(fn: SQL.SavepointContextCallback): Promise; + + /** + * The reserve method pulls out a connection from the pool, and returns a + * client that wraps the single connection. + * + * Using reserve() inside of a transaction will return a brand new + * connection, not one related to the transaction. This matches the + * behaviour of the `postgres` package. + */ + reserve(): Promise; + } + + namespace SQL { + class SQLError extends Error { + constructor(message: string); + } + + class PostgresError extends SQLError { + public readonly code: string; + public readonly errno: string | undefined; + public readonly detail: string | undefined; + public readonly hint: string | undefined; + public readonly severity: string | undefined; + public readonly position: string | undefined; + public readonly internalPosition: string | undefined; + public readonly internalQuery: string | undefined; + public readonly where: string | undefined; + public readonly schema: string | undefined; + public readonly table: string | undefined; + public readonly column: string | undefined; + public readonly dataType: string | undefined; + public readonly constraint: string | undefined; + public readonly file: string | undefined; + public readonly line: string | undefined; + public readonly routine: string | undefined; + + constructor( + message: string, + options: { + code: string; + errno?: string | undefined; + detail?: string; + hint?: string | undefined; + severity?: string | undefined; + position?: string | undefined; + internalPosition?: string; + internalQuery?: string; + where?: string | undefined; + schema?: string; + table?: string | undefined; + column?: string | undefined; + dataType?: string | undefined; + constraint?: string; + file?: string | undefined; + line?: string | undefined; + routine?: string | undefined; + }, + ); + } + + class SQLiteError extends SQLError { + public readonly code: string; + public readonly errno: number; + public readonly byteOffset?: number | undefined; + + constructor(message: string, options: { code: string; errno: number; byteOffset?: number | undefined }); + } + + type AwaitPromisesArray>> = { + [K in keyof T]: Awaited; + }; + + type ContextCallbackResult = T extends Array> ? AwaitPromisesArray : Awaited; + type ContextCallback = (sql: SQL) => Bun.MaybePromise; + + interface SQLiteOptions extends BunSQLite.DatabaseOptions { + adapter?: "sqlite"; + + /** + * Specify the path to the database file + * + * Examples: + * + * - `sqlite://:memory:` + * - `sqlite://./path/to/database.db` + * - `sqlite:///Users/bun/projects/my-app/database.db` + * - `./dev.db` + * - `:memory:` + * + * @default ":memory:" + */ + filename?: URL | ":memory:" | (string & {}) | undefined; + + /** + * Callback executed when a connection attempt completes (SQLite) + * Receives an Error on failure, or null on success. + */ + onconnect?: ((err: Error | null) => void) | undefined; + + /** + * Callback executed when a connection is closed (SQLite) + * Receives the closing Error or null. + */ + onclose?: ((err: Error | null) => void) | undefined; + } + + interface PostgresOptions { + /** + * Connection URL (can be string or URL object) + */ + url?: URL | string | undefined; + + /** + * Database server hostname + * @default "localhost" + */ + host?: string | undefined; + + /** + * Database server hostname (alias for host) + * @deprecated Prefer {@link host} + * @default "localhost" + */ + hostname?: string | undefined; + + /** + * Database server port number + * @default 5432 + */ + port?: number | string | undefined; + + /** + * Database user for authentication + * @default "postgres" + */ + username?: string | undefined; + + /** + * Database user for authentication (alias for username) + * @deprecated Prefer {@link username} + * @default "postgres" + */ + user?: string | undefined; + + /** + * Database password for authentication + * @default "" + */ + password?: string | (() => MaybePromise) | undefined; + + /** + * Database password for authentication (alias for password) + * @deprecated Prefer {@link password} + * @default "" + */ + pass?: string | (() => MaybePromise) | undefined; + + /** + * Name of the database to connect to + * @default The username value + */ + database?: string | undefined; + + /** + * Name of the database to connect to (alias for database) + * @deprecated Prefer {@link database} + * @default The username value + */ + db?: string | undefined; + + /** + * Database adapter/driver to use + * @default "postgres" + */ + adapter?: "postgres"; + + /** + * Maximum time in seconds to wait for connection to become available + * @default 0 (no timeout) + */ + idleTimeout?: number | undefined; + + /** + * Maximum time in seconds to wait for connection to become available (alias for idleTimeout) + * @deprecated Prefer {@link idleTimeout} + * @default 0 (no timeout) + */ + idle_timeout?: number | undefined; + + /** + * Maximum time in seconds to wait when establishing a connection + * @default 30 + */ + connectionTimeout?: number | undefined; + + /** + * Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout) + * @deprecated Prefer {@link connectionTimeout} + * @default 30 + */ + connection_timeout?: number | undefined; + + /** + * Maximum time in seconds to wait when establishing a connection (alias + * for connectionTimeout) + * @deprecated Prefer {@link connectionTimeout} + * @default 30 + */ + connectTimeout?: number | undefined; + + /** + * Maximum time in seconds to wait when establishing a connection (alias + * for connectionTimeout) + * @deprecated Prefer {@link connectionTimeout} + * @default 30 + */ + connect_timeout?: number | undefined; + + /** + * Maximum lifetime in seconds of a connection + * @default 0 (no maximum lifetime) + */ + maxLifetime?: number | undefined; + + /** + * Maximum lifetime in seconds of a connection (alias for maxLifetime) + * @deprecated Prefer {@link maxLifetime} + * @default 0 (no maximum lifetime) + */ + max_lifetime?: number | undefined; + + /** + * Whether to use TLS/SSL for the connection + * @default false + */ + tls?: TLSOptions | boolean | undefined; + + /** + * Whether to use TLS/SSL for the connection (alias for tls) + * @default false + */ + ssl?: TLSOptions | boolean | undefined; + + // `.path` is currently unsupported in Bun, the implementation is + // incomplete. + // + // /** + // * Unix domain socket path for connection + // * @default "" + // */ + // path?: string | undefined; + + /** + * Callback executed when a connection attempt completes + * Receives an Error on failure, or null on success. + */ + onconnect?: ((err: Error | null) => void) | undefined; + + /** + * Callback executed when a connection is closed + * Receives the closing Error or null. + */ + onclose?: ((err: Error | null) => void) | undefined; + + /** + * Postgres client runtime configuration options + * + * @see https://www.postgresql.org/docs/current/runtime-config-client.html + */ + connection?: Record | undefined; + + /** + * Maximum number of connections in the pool + * @default 10 + */ + max?: number | undefined; + + /** + * By default values outside i32 range are returned as strings. If this is + * true, values outside i32 range are returned as BigInts. + * @default false + */ + bigint?: boolean | undefined; + + /** + * Automatic creation of prepared statements + * @default true + */ + prepare?: boolean | undefined; + } + + /** + * Configuration options for SQL client connection and behavior + * + * @example + * ```ts + * const config: Bun.SQL.Options = { + * host: 'localhost', + * port: 5432, + * user: 'dbuser', + * password: 'secretpass', + * database: 'myapp', + * idleTimeout: 30, + * max: 20, + * onconnect: (client) => { + * console.log('Connected to database'); + * } + * }; + * ``` + */ + type Options = SQLiteOptions | PostgresOptions; + + /** + * Represents a SQL query that can be executed, with additional control + * methods Extends Promise to allow for async/await usage + */ + interface Query extends Promise { + /** + * Indicates if the query is currently executing + */ + active: boolean; + + /** + * Indicates if the query has been cancelled + */ + cancelled: boolean; + + /** + * Cancels the executing query + */ + cancel(): Query; + + /** + * Executes the query as a simple query, no parameters are allowed but can + * execute multiple commands separated by semicolons + */ + simple(): Query; + + /** + * Executes the query + */ + execute(): Query; + + /** + * Returns the raw query result + */ + raw(): Query; + + /** + * Returns only the values from the query result + */ + values(): Query; + } + + /** + * Callback function type for transaction contexts + * @param sql Function to execute SQL queries within the transaction + */ + type TransactionContextCallback = ContextCallback; + + /** + * Callback function type for savepoint contexts + * @param sql Function to execute SQL queries within the savepoint + */ + type SavepointContextCallback = ContextCallback; + + /** + * SQL.Helper represents a parameter or serializable + * value inside of a query. + * + * @example + * ```ts + * const helper = sql(users, 'id'); + * await sql`insert into users ${helper}`; + * ``` + */ + interface Helper { + readonly value: T[]; + readonly columns: (keyof T)[]; + } + } + + interface SQL extends AsyncDisposable { + /** + * Executes a SQL query using template literals + * @example + * ```ts + * const [user] = await sql`select * from users where id = ${1}`; + * ``` + */ + (strings: TemplateStringsArray, ...values: unknown[]): SQL.Query; + + /** + * Execute a SQL query using a string + * + * @example + * ```ts + * const users = await sql`SELECT * FROM users WHERE id = ${1}`; + * ``` + */ + (string: string): SQL.Query; + + /** + * Helper function for inserting an object into a query + * + * @example + * ```ts + * // Insert an object + * const result = await sql`insert into users ${sql(users)} returning *`; + * + * // Or pick specific columns + * const result = await sql`insert into users ${sql(users, "id", "name")} returning *`; + * + * // Or a single object + * const result = await sql`insert into users ${sql(user)} returning *`; + * ``` + */ + (obj: T | T[] | readonly T[]): SQL.Helper; // Contributor note: This is the same as the signature below with the exception of the columns and the Pick + + /** + * Helper function for inserting an object into a query, supporting specific columns + * + * @example + * ```ts + * // Insert an object + * const result = await sql`insert into users ${sql(users)} returning *`; + * + * // Or pick specific columns + * const result = await sql`insert into users ${sql(users, "id", "name")} returning *`; + * + * // Or a single object + * const result = await sql`insert into users ${sql(user)} returning *`; + * ``` + */ + ( + obj: T | T[] | readonly T[], + ...columns: readonly Keys[] + ): SQL.Helper>; // Contributor note: This is the same as the signature above with the exception of this signature tracking keys + + /** + * Helper function for inserting any serializable value into a query + * + * @example + * ```ts + * const result = await sql`SELECT * FROM users WHERE id IN ${sql([1, 2, 3])}`; + * ``` + */ + (value: T): SQL.Helper; + } + + /** + * Main SQL client interface providing connection and transaction management + */ + class SQL { + /** + * Creates a new SQL client instance + * + * @param connectionString - The connection string for the SQL client + * + * @example + * ```ts + * const sql = new SQL("postgres://localhost:5432/mydb"); + * const sql = new SQL(new URL("postgres://localhost:5432/mydb")); + * ``` + */ + constructor(connectionString: string | URL); + + /** + * Creates a new SQL client instance with options + * + * @param connectionString - The connection string for the SQL client + * @param options - The options for the SQL client + * + * @example + * ```ts + * const sql = new SQL("postgres://localhost:5432/mydb", { idleTimeout: 1000 }); + * ``` + */ + constructor( + connectionString: string | URL, + options: Bun.__internal.DistributedOmit, + ); + + /** + * Creates a new SQL client instance with options + * + * @param options - The options for the SQL client + * + * @example + * ```ts + * const sql = new SQL({ url: "postgres://localhost:5432/mydb", idleTimeout: 1000 }); + * ``` + */ + constructor(options?: SQL.Options); + + /** + * Current client options + */ + options: Bun.__internal.DistributedMerge; + + /** + * Commits a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL + * + * @param name - The name of the distributed transaction + * + * @throws {Error} If the adapter does not support distributed transactions (e.g., SQLite) + * + * @example + * ```ts + * await sql.commitDistributed("my_distributed_transaction"); + * ``` + */ + commitDistributed(name: string): Promise; + + /** + * Rolls back a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL + * + * @param name - The name of the distributed transaction + * + * @throws {Error} If the adapter does not support distributed transactions (e.g., SQLite) + * + * @example + * ```ts + * await sql.rollbackDistributed("my_distributed_transaction"); + * ``` + */ + rollbackDistributed(name: string): Promise; + + /** Waits for the database connection to be established + * + * @example + * ```ts + * await sql.connect(); + * ``` + */ + connect(): Promise; + + /** + * Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing. + * + * @param options - The options for the close + * + * @example + * ```ts + * await sql.close({ timeout: 1 }); + * ``` + */ + close(options?: { timeout?: number }): Promise; + + /** + * Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing. + * This is an alias of {@link SQL.close} + * + * @param options - The options for the close + * + * @example + * ```ts + * await sql.end({ timeout: 1 }); + * ``` + */ + end(options?: { timeout?: number }): Promise; + + /** + * Flushes any pending operations + * + * @throws {Error} If the adapter does not support flushing (e.g., SQLite) + * + * @example + * ```ts + * sql.flush(); + * ``` + */ + flush(): void; + + /** + * The reserve method pulls out a connection from the pool, and returns a client that wraps the single connection. + * + * This can be used for running queries on an isolated connection. + * Calling reserve in a reserved Sql will return a new reserved connection, not the same connection (behavior matches postgres package). + * + * @throws {Error} If the adapter does not support connection pooling (e.g., SQLite)s + * + * @example + * ```ts + * const reserved = await sql.reserve(); + * await reserved`select * from users`; + * await reserved.release(); + * // with in a production scenario would be something more like + * const reserved = await sql.reserve(); + * try { + * // ... queries + * } finally { + * await reserved.release(); + * } + * + * // Bun supports Symbol.dispose and Symbol.asyncDispose + * // always release after context (safer) + * using reserved = await sql.reserve() + * await reserved`select * from users` + * ``` + */ + reserve(): Promise; + + /** + * Begins a new transaction. + * + * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function. + * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. + * @example + * const [user, account] = await sql.begin(async sql => { + * const [user] = await sql` + * insert into users ( + * name + * ) values ( + * 'Murray' + * ) + * returning * + * ` + * const [account] = await sql` + * insert into accounts ( + * user_id + * ) values ( + * ${ user.user_id } + * ) + * returning * + * ` + * return [user, account] + * }) + */ + begin(fn: SQL.TransactionContextCallback): Promise>; + + /** + * Begins a new transaction with options. + * + * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function. + * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. + * @example + * const [user, account] = await sql.begin("read write", async sql => { + * const [user] = await sql` + * insert into users ( + * name + * ) values ( + * 'Murray' + * ) + * returning * + * ` + * const [account] = await sql` + * insert into accounts ( + * user_id + * ) values ( + * ${ user.user_id } + * ) + * returning * + * ` + * return [user, account] + * }) + */ + begin(options: string, fn: SQL.TransactionContextCallback): Promise>; + + /** + * Alternative method to begin a transaction. + * + * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function. + * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. + * @alias begin + * @example + * const [user, account] = await sql.transaction(async sql => { + * const [user] = await sql` + * insert into users ( + * name + * ) values ( + * 'Murray' + * ) + * returning * + * ` + * const [account] = await sql` + * insert into accounts ( + * user_id + * ) values ( + * ${ user.user_id } + * ) + * returning * + * ` + * return [user, account] + * }) + */ + transaction(fn: SQL.TransactionContextCallback): Promise>; + + /** + * Alternative method to begin a transaction with options + * Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function. + * BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue. + * + * @alias {@link begin} + * + * @example + * const [user, account] = await sql.transaction("read write", async sql => { + * const [user] = await sql` + * insert into users ( + * name + * ) values ( + * 'Murray' + * ) + * returning * + * ` + * const [account] = await sql` + * insert into accounts ( + * user_id + * ) values ( + * ${ user.user_id } + * ) + * returning * + * ` + * return [user, account] + * }); + */ + transaction(options: string, fn: SQL.TransactionContextCallback): Promise>; + + /** + * Begins a distributed transaction + * Also know as Two-Phase Commit, in a distributed transaction, Phase 1 involves the coordinator preparing nodes by ensuring data is written and ready to commit, while Phase 2 finalizes with nodes committing or rolling back based on the coordinator's decision, ensuring durability and releasing locks. + * In PostgreSQL and MySQL distributed transactions persist beyond the original session, allowing privileged users or coordinators to commit/rollback them, ensuring support for distributed transactions, recovery, and administrative tasks. + * beginDistributed will automatic rollback if any exception are not caught, and you can commit and rollback later if everything goes well. + * PostgreSQL natively supports distributed transactions using PREPARE TRANSACTION, while MySQL uses XA Transactions, and MSSQL also supports distributed/XA transactions. However, in MSSQL, distributed transactions are tied to the original session, the DTC coordinator, and the specific connection. + * These transactions are automatically committed or rolled back following the same rules as regular transactions, with no option for manual intervention from other sessions, in MSSQL distributed transactions are used to coordinate transactions using Linked Servers. + * + * @throws {Error} If the adapter does not support distributed transactions (e.g., SQLite) + * + * @example + * await sql.beginDistributed("numbers", async sql => { + * await sql`create table if not exists numbers (a int)`; + * await sql`insert into numbers values(1)`; + * }); + * // later you can call + * await sql.commitDistributed("numbers"); + * // or await sql.rollbackDistributed("numbers"); + */ + beginDistributed( + name: string, + fn: SQL.TransactionContextCallback, + ): Promise>; + + /** Alternative method to begin a distributed transaction + * @alias {@link beginDistributed} + */ + distributed(name: string, fn: SQL.TransactionContextCallback): Promise>; + + /**If you know what you're doing, you can use unsafe to pass any string you'd like. + * Please note that this can lead to SQL injection if you're not careful. + * You can also nest sql.unsafe within a safe sql expression. This is useful if only part of your fraction has unsafe elements. + * @example + * const result = await sql.unsafe(`select ${danger} from users where id = ${dragons}`) + */ + unsafe(string: string, values?: any[]): SQL.Query; + + /** + * Reads a file and uses the contents as a query. + * Optional parameters can be used if the file includes $1, $2, etc + * @example + * const result = await sql.file("query.sql", [1, 2, 3]); + */ + file(filename: string, values?: any[]): SQL.Query; + } + + /** + * SQL client + */ + const sql: SQL; + + /** + * SQL client for PostgreSQL + * + * @deprecated Prefer {@link Bun.sql} + */ + const postgres: SQL; + + /** + * Represents a savepoint within a transaction + */ + interface SavepointSQL extends SQL {} +} diff --git a/packages/bun-types/sqlite.d.ts b/packages/bun-types/sqlite.d.ts index 0c79d22779..47ef366629 100644 --- a/packages/bun-types/sqlite.d.ts +++ b/packages/bun-types/sqlite.d.ts @@ -24,6 +24,66 @@ * | `null` | `NULL` | */ declare module "bun:sqlite" { + /** + * Options for {@link Database} + */ + export interface DatabaseOptions { + /** + * Open the database as read-only (no write operations, no create). + * + * Equivalent to {@link constants.SQLITE_OPEN_READONLY} + */ + readonly?: boolean; + + /** + * Allow creating a new database + * + * Equivalent to {@link constants.SQLITE_OPEN_CREATE} + */ + create?: boolean; + + /** + * Open the database as read-write + * + * Equivalent to {@link constants.SQLITE_OPEN_READWRITE} + */ + readwrite?: boolean; + + /** + * When set to `true`, integers are returned as `bigint` types. + * + * When set to `false`, integers are returned as `number` types and truncated to 52 bits. + * + * @default false + * @since v1.1.14 + */ + safeIntegers?: boolean; + + /** + * When set to `false` or `undefined`: + * - Queries missing bound parameters will NOT throw an error + * - Bound named parameters in JavaScript need to exactly match the SQL query. + * + * @example + * ```ts + * const db = new Database(":memory:", { strict: false }); + * db.run("INSERT INTO foo (name) VALUES ($name)", { $name: "foo" }); + * ``` + * + * When set to `true`: + * - Queries missing bound parameters will throw an error + * - Bound named parameters in JavaScript no longer need to be `$`, `:`, or `@`. The SQL query will remain prefixed. + * + * @example + * ```ts + * const db = new Database(":memory:", { strict: true }); + * db.run("INSERT INTO foo (name) VALUES ($name)", { name: "foo" }); + * ``` + * @since v1.1.14 + */ + strict?: boolean; + } + /** * A SQLite3 database * @@ -53,8 +113,6 @@ declare module "bun:sqlite" { * ```ts * const db = new Database("mydb.sqlite", {readonly: true}); * ``` - * - * @category Database */ export class Database implements Disposable { /** @@ -63,96 +121,19 @@ declare module "bun:sqlite" { * @param filename The filename of the database to open. Pass an empty string (`""`) or `":memory:"` or undefined for an in-memory database. * @param options defaults to `{readwrite: true, create: true}`. If a number, then it's treated as `SQLITE_OPEN_*` constant flags. */ - constructor( - filename?: string, - options?: - | number - | { - /** - * Open the database as read-only (no write operations, no create). - * - * Equivalent to {@link constants.SQLITE_OPEN_READONLY} - */ - readonly?: boolean; - /** - * Allow creating a new database - * - * Equivalent to {@link constants.SQLITE_OPEN_CREATE} - */ - create?: boolean; - /** - * Open the database as read-write - * - * Equivalent to {@link constants.SQLITE_OPEN_READWRITE} - */ - readwrite?: boolean; - - /** - * When set to `true`, integers are returned as `bigint` types. - * - * When set to `false`, integers are returned as `number` types and truncated to 52 bits. - * - * @default false - * @since v1.1.14 - */ - safeIntegers?: boolean; - - /** - * When set to `false` or `undefined`: - * - Queries missing bound parameters will NOT throw an error - * - Bound named parameters in JavaScript need to exactly match the SQL query. - * - * @example - * ```ts - * const db = new Database(":memory:", { strict: false }); - * db.run("INSERT INTO foo (name) VALUES ($name)", { $name: "foo" }); - * ``` - * - * When set to `true`: - * - Queries missing bound parameters will throw an error - * - Bound named parameters in JavaScript no longer need to be `$`, `:`, or `@`. The SQL query will remain prefixed. - * - * @example - * ```ts - * const db = new Database(":memory:", { strict: true }); - * db.run("INSERT INTO foo (name) VALUES ($name)", { name: "foo" }); - * ``` - * @since v1.1.14 - */ - strict?: boolean; - }, - ); + constructor(filename?: string, options?: number | DatabaseOptions); /** + * Open or create a SQLite3 databases + * + * @param filename The filename of the database to open. Pass an empty string (`""`) or `":memory:"` or undefined for an in-memory database. + * @param options defaults to `{readwrite: true, create: true}`. If a number, then it's treated as `SQLITE_OPEN_*` constant flags. + * * This is an alias of `new Database()` * * See {@link Database} */ - static open( - filename: string, - options?: - | number - | { - /** - * Open the database as read-only (no write operations, no create). - * - * Equivalent to {@link constants.SQLITE_OPEN_READONLY} - */ - readonly?: boolean; - /** - * Allow creating a new database - * - * Equivalent to {@link constants.SQLITE_OPEN_CREATE} - */ - create?: boolean; - /** - * Open the database as read-write - * - * Equivalent to {@link constants.SQLITE_OPEN_READWRITE} - */ - readwrite?: boolean; - }, - ): Database; + static open(filename: string, options?: number | DatabaseOptions): Database; /** * Execute a SQL query **without returning any results**. @@ -203,8 +184,11 @@ declare module "bun:sqlite" { * @returns `Database` instance */ run(sql: string, ...bindings: ParamsType[]): Changes; + /** * This is an alias of {@link Database.run} + * + * @deprecated Prefer {@link Database.run} */ exec(sql: string, ...bindings: ParamsType[]): Changes; @@ -351,6 +335,16 @@ declare module "bun:sqlite" { */ static setCustomSQLite(path: string): boolean; + /** + * Closes the database when using the async resource proposal + * + * @example + * ``` + * using db = new Database("myapp.db"); + * doSomethingWithDatabase(db); + * // Automatically closed when `db` goes out of scope + * ``` + */ [Symbol.dispose](): void; /** @@ -744,6 +738,30 @@ declare module "bun:sqlite" { */ values(...params: ParamsType): Array>; + /** + * Execute the prepared statement and return all results as arrays of + * `Uint8Array`s. + * + * This is similar to `values()` but returns all values as Uint8Array + * objects, regardless of their original SQLite type. + * + * @param params optional values to bind to the statement. If omitted, the + * statement is run with the last bound values or no parameters if there are + * none. + * + * @example + * ```ts + * const stmt = db.prepare("SELECT * FROM foo WHERE bar = ?"); + * + * stmt.raw("baz"); + * // => [[Uint8Array(24)]] + * + * stmt.raw(); + * // => [[Uint8Array(24)]] + * ``` + */ + raw(...params: ParamsType): Array>; + /** * The names of the columns returned by the prepared statement. * @example diff --git a/src/bun.js/bindings/BunObject.cpp b/src/bun.js/bindings/BunObject.cpp index 57c9f15d8a..de3a1f06bb 100644 --- a/src/bun.js/bindings/BunObject.cpp +++ b/src/bun.js/bindings/BunObject.cpp @@ -307,6 +307,9 @@ static JSValue defaultBunSQLObject(VM& vm, JSObject* bunObject) auto scope = DECLARE_THROW_SCOPE(vm); auto* globalObject = defaultGlobalObject(bunObject->globalObject()); JSValue sqlValue = globalObject->internalModuleRegistry()->requireId(globalObject, vm, InternalModuleRegistry::BunSql); +#if BUN_DEBUG + if (scope.exception()) globalObject->reportUncaughtExceptionAtEventLoop(globalObject, scope.exception()); +#endif RETURN_IF_EXCEPTION(scope, {}); RELEASE_AND_RETURN(scope, sqlValue.getObject()->get(globalObject, vm.propertyNames->defaultKeyword)); } @@ -316,6 +319,9 @@ static JSValue constructBunSQLObject(VM& vm, JSObject* bunObject) auto scope = DECLARE_THROW_SCOPE(vm); auto* globalObject = defaultGlobalObject(bunObject->globalObject()); JSValue sqlValue = globalObject->internalModuleRegistry()->requireId(globalObject, vm, InternalModuleRegistry::BunSql); +#if BUN_DEBUG + if (scope.exception()) globalObject->reportUncaughtExceptionAtEventLoop(globalObject, scope.exception()); +#endif RETURN_IF_EXCEPTION(scope, {}); auto clientData = WebCore::clientData(vm); RELEASE_AND_RETURN(scope, sqlValue.getObject()->get(globalObject, clientData->builtinNames().SQLPublicName())); diff --git a/src/bun.js/bindings/sqlite/JSSQLStatement.cpp b/src/bun.js/bindings/sqlite/JSSQLStatement.cpp index 66d37d5895..521a5946c0 100644 --- a/src/bun.js/bindings/sqlite/JSSQLStatement.cpp +++ b/src/bun.js/bindings/sqlite/JSSQLStatement.cpp @@ -280,6 +280,7 @@ JSC_DECLARE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionGet); JSC_DECLARE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionAll); JSC_DECLARE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionIterate); JSC_DECLARE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionRows); +JSC_DECLARE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionRawRows); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetColumnNames); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetColumnCount); @@ -294,6 +295,7 @@ JSC_DECLARE_HOST_FUNCTION(jsSQLStatementToStringFunction); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetColumnNames); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetColumnCount); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetParamCount); +JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetHasMultipleStatements); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetColumnTypes); JSC_DECLARE_CUSTOM_GETTER(jsSqlStatementGetColumnDeclaredTypes); @@ -488,9 +490,64 @@ protected: void finishCreation(JSC::VM& vm); }; +static JSValue toJSAsBuffer(JSC::VM& vm, JSC::JSGlobalObject* globalObject, sqlite3_stmt* stmt, int i) +{ + auto scope = DECLARE_THROW_SCOPE(vm); + + switch (sqlite3_column_type(stmt, i)) { + case SQLITE_INTEGER: { + int64_t value = sqlite3_column_int64(stmt, i); + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 8); + RETURN_IF_EXCEPTION(scope, {}); + uint8_t* data = array->typedVector(); + for (int j = 0; j < 8; j++) { + data[j] = (value >> (j * 8)) & 0xFF; + } + return array; + } + case SQLITE_FLOAT: { + double value = sqlite3_column_double(stmt, i); + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 8); + RETURN_IF_EXCEPTION(scope, {}); + memcpy(array->typedVector(), &value, 8); + return array; + } + case SQLITE3_TEXT: { + size_t len = sqlite3_column_bytes(stmt, i); + const unsigned char* text = len > 0 ? sqlite3_column_text(stmt, i) : nullptr; + if (text == nullptr || len == 0) [[unlikely]] { + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 0); + RETURN_IF_EXCEPTION(scope, {}); + return array; + } + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), len); + RETURN_IF_EXCEPTION(scope, {}); + memcpy(array->typedVector(), text, len); + return array; + } + case SQLITE_BLOB: { + size_t len = sqlite3_column_bytes(stmt, i); + const void* blob = len > 0 ? sqlite3_column_blob(stmt, i) : nullptr; + if (len > 0 && blob != nullptr) [[likely]] { + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), len); + RETURN_IF_EXCEPTION(scope, {}); + memcpy(array->vector(), blob, len); + return array; + } + JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 0); + RETURN_IF_EXCEPTION(scope, {}); + return array; + } + case SQLITE_NULL: + default: + return jsNull(); + } +} + template static JSValue toJS(JSC::VM& vm, JSC::JSGlobalObject* globalObject, sqlite3_stmt* stmt, int i) { + auto throwScope = DECLARE_THROW_SCOPE(vm); switch (sqlite3_column_type(stmt, i)) { case SQLITE_INTEGER: { if constexpr (!useBigInt64) { @@ -498,7 +555,9 @@ static JSValue toJS(JSC::VM& vm, JSC::JSGlobalObject* globalObject, sqlite3_stmt return jsNumberFromSQLite(stmt, i); } else { // https://github.com/oven-sh/bun/issues/1536 - return jsBigIntFromSQLite(globalObject, stmt, i); + auto bint = jsBigIntFromSQLite(globalObject, stmt, i); + RETURN_IF_EXCEPTION(throwScope, {}); + return bint; } } case SQLITE_FLOAT: { @@ -515,20 +574,27 @@ static JSValue toJS(JSC::VM& vm, JSC::JSGlobalObject* globalObject, sqlite3_stmt return jsEmptyString(vm); } - return len < 64 ? jsString(vm, WTF::String::fromUTF8({ text, len })) : JSC::JSValue::decode(Bun__encoding__toStringUTF8(text, len, globalObject)); + if (len < 64) { + return jsString(vm, WTF::String::fromUTF8({ text, len })); + } + + auto encoded = Bun__encoding__toStringUTF8(text, len, globalObject); + RETURN_IF_EXCEPTION(throwScope, {}); + return JSC::JSValue::decode(encoded); } case SQLITE_BLOB: { size_t len = sqlite3_column_bytes(stmt, i); const void* blob = len > 0 ? sqlite3_column_blob(stmt, i) : nullptr; if (len > 0 && blob != nullptr) [[likely]] { - auto scope = DECLARE_THROW_SCOPE(vm); JSC::JSUint8Array* array = JSC::JSUint8Array::createUninitialized(globalObject, globalObject->m_typedArrayUint8.get(globalObject), len); - RETURN_IF_EXCEPTION(scope, {}); + RETURN_IF_EXCEPTION(throwScope, {}); memcpy(array->vector(), blob, len); return array; } - return JSC::JSUint8Array::create(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 0); + auto array = JSC::JSUint8Array::create(globalObject, globalObject->m_typedArrayUint8.get(globalObject), 0); + RETURN_IF_EXCEPTION(throwScope, {}); + return array; } default: { break; @@ -545,6 +611,7 @@ static const HashTableValue JSSQLStatementPrototypeTableValues[] = { { "iterate"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementExecuteStatementFunctionIterate, 1 } }, { "as"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementSetPrototypeFunction, 1 } }, { "values"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementExecuteStatementFunctionRows, 1 } }, + { "raw"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementExecuteStatementFunctionRawRows, 1 } }, { "finalize"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementFunctionFinalize, 0 } }, { "toString"_s, static_cast(JSC::PropertyAttribute::Function), NoIntrinsic, { HashTableValue::NativeFunctionType, jsSQLStatementToStringFunction, 0 } }, { "columns"_s, static_cast(JSC::PropertyAttribute::ReadOnly | JSC::PropertyAttribute::CustomAccessor), NoIntrinsic, { HashTableValue::GetterSetterType, jsSqlStatementGetColumnNames, 0 } }, @@ -553,7 +620,6 @@ static const HashTableValue JSSQLStatementPrototypeTableValues[] = { { "columnTypes"_s, static_cast(JSC::PropertyAttribute::ReadOnly | JSC::PropertyAttribute::CustomAccessor), NoIntrinsic, { HashTableValue::GetterSetterType, jsSqlStatementGetColumnTypes, 0 } }, { "declaredTypes"_s, static_cast(JSC::PropertyAttribute::ReadOnly | JSC::PropertyAttribute::CustomAccessor), NoIntrinsic, { HashTableValue::GetterSetterType, jsSqlStatementGetColumnDeclaredTypes, 0 } }, { "safeIntegers"_s, static_cast(JSC::PropertyAttribute::CustomAccessor), NoIntrinsic, { HashTableValue::GetterSetterType, jsSqlStatementGetSafeIntegers, jsSqlStatementSetSafeIntegers } }, - }; class JSSQLStatementPrototype final : public JSC::JSNonFinalObject { @@ -712,6 +778,7 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ break; const auto key = Identifier::fromString(vm, WTF::String::fromUTF8({ name, len })); + JSC::JSValue primitive = JSC::jsUndefined(); auto decl = sqlite3_column_decltype(stmt, i); if (decl != nullptr) { @@ -1857,6 +1924,22 @@ static inline JSC::JSValue constructResultObject(JSC::JSGlobalObject* lexicalGlo RELEASE_AND_RETURN(scope, result); } +static inline JSC::JSArray* constructResultRowRaw(JSC::VM& vm, JSC::JSGlobalObject* lexicalGlobalObject, JSSQLStatement* castedThis, size_t columnCount) +{ + auto throwScope = DECLARE_THROW_SCOPE(vm); + auto* stmt = castedThis->stmt; + MarkedArgumentBuffer arguments; + arguments.ensureCapacity(columnCount); + + for (size_t i = 0; i < columnCount; i++) { + JSValue value = toJSAsBuffer(vm, lexicalGlobalObject, stmt, i); + RETURN_IF_EXCEPTION(throwScope, nullptr); + arguments.append(value); + } + + RELEASE_AND_RETURN(throwScope, JSC::constructArray(lexicalGlobalObject, static_cast(nullptr), arguments)); +} + static inline JSC::JSArray* constructResultRow(JSC::VM& vm, JSC::JSGlobalObject* lexicalGlobalObject, JSSQLStatement* castedThis, size_t columnCount) { auto throwScope = DECLARE_THROW_SCOPE(vm); @@ -2181,7 +2264,9 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionRows, (JSC::JSGlo if (!castedThis->hasExecuted || castedThis->need_update()) { initializeColumnNames(lexicalGlobalObject, castedThis); + if (scope.exception()) [[unlikely]] { + // Don't forget to reset before releasing the exception. sqlite3_reset(stmt); RELEASE_AND_RETURN(scope, {}); } @@ -2212,10 +2297,97 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionRows, (JSC::JSGlo RELEASE_AND_RETURN(scope, {}); } resultArray->push(lexicalGlobalObject, row); + RETURN_IF_EXCEPTION(scope, {}); + status = sqlite3_step(stmt); + } while (status == SQLITE_ROW); + } + + result = resultArray; + } + } else if (status == SQLITE_DONE && columnCount != 0) { + // breaking change in Bun v0.6.8 + result = JSC::constructEmptyArray(lexicalGlobalObject, nullptr, 0); + RETURN_IF_EXCEPTION(scope, {}); + } + + if (status != SQLITE_DONE && status != SQLITE_OK) [[unlikely]] { + throwException(lexicalGlobalObject, scope, createSQLiteError(lexicalGlobalObject, castedThis->version_db->db)); + sqlite3_reset(stmt); + return {}; + } + + // sqlite3_reset(stmt); + RELEASE_AND_RETURN(scope, JSC::JSValue::encode(result)); +} + +JSC_DEFINE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionRawRows, (JSC::JSGlobalObject * lexicalGlobalObject, JSC::CallFrame* callFrame)) +{ + auto& vm = JSC::getVM(lexicalGlobalObject); + auto scope = DECLARE_THROW_SCOPE(vm); + auto castedThis = jsDynamicCast(callFrame->thisValue()); + + CHECK_THIS; + + auto* stmt = castedThis->stmt; + CHECK_PREPARED + + int statusCode = sqlite3_reset(stmt); + if (statusCode != SQLITE_OK) [[unlikely]] { + throwException(lexicalGlobalObject, scope, createSQLiteError(lexicalGlobalObject, castedThis->version_db->db)); + sqlite3_reset(stmt); + return {}; + } + + int count = callFrame->argumentCount(); + if (count > 0) { + auto arg0 = callFrame->argument(0); + DO_REBIND(arg0); + } + + int status = sqlite3_step(stmt); + if (!sqlite3_stmt_readonly(stmt)) { + castedThis->version_db->version++; + } + + if (!castedThis->hasExecuted || castedThis->need_update()) { + initializeColumnNames(lexicalGlobalObject, castedThis); + if (scope.exception()) [[unlikely]] { + sqlite3_reset(stmt); + RELEASE_AND_RETURN(scope, {}); + } + } + + size_t columnCount = castedThis->columnNames->size(); + JSValue result = jsNull(); + if (status == SQLITE_ROW) { + // this is a count from UPDATE or another query like that + if (columnCount == 0) { + while (status == SQLITE_ROW) { + status = sqlite3_step(stmt); + } + + result = jsNumber(sqlite3_column_count(stmt)); + + } else { + + JSC::JSArray* resultArray = JSC::constructEmptyArray(lexicalGlobalObject, nullptr, 0); + RETURN_IF_EXCEPTION(scope, {}); + { + size_t columnCount = sqlite3_column_count(stmt); + + do { + JSC::JSArray* row = constructResultRowRaw(vm, lexicalGlobalObject, castedThis, columnCount); + if (!row || scope.exception()) [[unlikely]] { + sqlite3_reset(stmt); + RELEASE_AND_RETURN(scope, {}); + } + resultArray->push(lexicalGlobalObject, row); + if (scope.exception()) [[unlikely]] { sqlite3_reset(stmt); RELEASE_AND_RETURN(scope, {}); } + status = sqlite3_step(stmt); } while (status == SQLITE_ROW); } @@ -2299,7 +2471,9 @@ JSC_DEFINE_HOST_FUNCTION(jsSQLStatementExecuteStatementFunctionRun, (JSC::JSGlob int64_t last_insert_rowid = sqlite3_last_insert_rowid(db); diff->putInternalField(vm, 0, JSC::jsNumber(total_changes_after - total_changes_before)); if (castedThis->useBigInt64) { - diff->putInternalField(vm, 1, JSBigInt::createFrom(lexicalGlobalObject, last_insert_rowid)); + JSValue lastRowIdBigInt = JSBigInt::createFrom(lexicalGlobalObject, last_insert_rowid); + RETURN_IF_EXCEPTION(scope, {}); + diff->putInternalField(vm, 1, lastRowIdBigInt); } else { diff->putInternalField(vm, 1, JSC::jsNumber(last_insert_rowid)); } diff --git a/src/codegen/bundle-modules.ts b/src/codegen/bundle-modules.ts index 2998f6a78c..bec07d0e16 100644 --- a/src/codegen/bundle-modules.ts +++ b/src/codegen/bundle-modules.ts @@ -75,11 +75,14 @@ async function retry(n, fn) { throw err; } +const bunRepoRoot = path.join(CMAKE_BUILD_ROOT, "..", ".."); + // Preprocess builtins const bundledEntryPoints: string[] = []; for (let i = 0; i < nativeStartIndex; i++) { try { - let input = fs.readFileSync(path.join(BASE, moduleList[i]), "utf8"); + const file = path.join(BASE, moduleList[i]); + let input = fs.readFileSync(file, "utf8"); if (!/\bexport\s+(?:function|class|const|default|{)/.test(input)) { if (input.includes("module.exports")) { @@ -87,7 +90,9 @@ for (let i = 0; i < nativeStartIndex; i++) { "Do not use CommonJS module.exports in ESM modules. Use `export default { ... }` instead. See src/js/README.md", ); } else { - throw new Error("Internal modules must have at least one ESM export statement. See src/js/README.md"); + throw new Error( + `Internal modules must have at least one ESM export statement in '${path.relative(bunRepoRoot, file)}' — see src/js/README.md`, + ); } } diff --git a/src/js/builtins.d.ts b/src/js/builtins.d.ts index 24f0407ad5..ba27f233cc 100644 --- a/src/js/builtins.d.ts +++ b/src/js/builtins.d.ts @@ -172,6 +172,7 @@ declare function $idWithProfile(): TODO; * @see [JIT implementation](https://github.com/oven-sh/WebKit/blob/433f7598bf3537a295d0af5ffd83b9a307abec4e/Source/JavaScriptCore/jit/JITOpcodes.cpp#L311) */ declare function $isObject(obj: unknown): obj is object; +declare function $isArray(obj: T): obj is Extract | Extract; declare function $isArray(obj: unknown): obj is any[]; declare function $isCallable(fn: unknown): fn is CallableFunction; declare function $isConstructor(fn: unknown): fn is { new (...args: any[]): any }; @@ -842,7 +843,7 @@ interface ObjectConstructor { declare const $Object: ObjectConstructor; /** gets a property on an object */ -declare function $getByIdDirect(obj: any, key: string): T; +declare function $getByIdDirect(obj: T, key: K): T[K]; /** * Gets a private property on an object. diff --git a/src/js/builtins/ReadableStreamInternals.ts b/src/js/builtins/ReadableStreamInternals.ts index 5cb7316330..f505268cc8 100644 --- a/src/js/builtins/ReadableStreamInternals.ts +++ b/src/js/builtins/ReadableStreamInternals.ts @@ -2050,7 +2050,6 @@ export function createLazyLoadedStreamPrototype(): typeof ReadableStreamDefaultC throw $ERR_INVALID_STATE("Internal error: invalid result from pull. This is a bug in Bun. Please report it."); } - // eslint-disable-next-line no-unused-private-class-members #pull(controller) { var handle = $getByIdDirectPrivate(this, "stream"); @@ -2103,7 +2102,6 @@ export function createLazyLoadedStreamPrototype(): typeof ReadableStreamDefaultC } } - // eslint-disable-next-line no-unused-private-class-members #cancel(reason) { var handle = $getByIdDirectPrivate(this, "stream"); this.$data = undefined; diff --git a/src/js/bun/sql.ts b/src/js/bun/sql.ts index f4f92050cb..ffc317bad1 100644 --- a/src/js/bun/sql.ts +++ b/src/js/bun/sql.ts @@ -1,1574 +1,73 @@ -import type * as BunTypes from "bun"; +import type { PostgresAdapter } from "internal/sql/postgres"; +import type { BaseQueryHandle, Query } from "internal/sql/query"; +import type { SQLHelper } from "internal/sql/shared"; -const enum QueryStatus { - active = 1 << 1, - cancelled = 1 << 2, - error = 1 << 3, - executed = 1 << 4, - invalidHandle = 1 << 5, -} -const cmds = ["", "INSERT", "DELETE", "UPDATE", "MERGE", "SELECT", "MOVE", "FETCH", "COPY"]; +const { Query, SQLQueryFlags } = require("internal/sql/query"); +const { PostgresAdapter } = require("internal/sql/postgres"); +const { SQLiteAdapter } = require("internal/sql/sqlite"); +const { SQLHelper, parseOptions } = require("internal/sql/shared"); +const { connectionClosedError } = require("internal/sql/utils"); +const { SQLError, PostgresError, SQLiteError } = require("internal/sql/errors"); -const PublicArray = globalThis.Array; -const enum SSLMode { - disable = 0, - prefer = 1, - require = 2, - verify_ca = 3, - verify_full = 4, -} - -const { hideFromStack } = require("internal/shared"); const defineProperties = Object.defineProperties; -function connectionClosedError() { - return $ERR_POSTGRES_CONNECTION_CLOSED("Connection closed"); -} -function notTaggedCallError() { - return $ERR_POSTGRES_NOT_TAGGED_CALL("Query not called as a tagged template literal"); -} -hideFromStack(connectionClosedError); -hideFromStack(notTaggedCallError); - -enum SQLQueryResultMode { - objects = 0, - values = 1, - raw = 2, -} -const escapeIdentifier = function escape(str) { - return '"' + str.replaceAll('"', '""').replaceAll(".", '"."') + '"'; -}; -class SQLResultArray extends PublicArray { - static [Symbol.toStringTag] = "SQLResults"; - - constructor() { - super(); - // match postgres's result array, in this way for in will not list the properties and .map will not return undefined command and count - Object.defineProperties(this, { - count: { value: null, writable: true }, - command: { value: null, writable: true }, - }); - } - static get [Symbol.species]() { - return Array; - } -} - -const _resolve = Symbol("resolve"); -const _reject = Symbol("reject"); -const _handle = Symbol("handle"); -const _run = Symbol("run"); -const _queryStatus = Symbol("status"); -const _handler = Symbol("handler"); -const _strings = Symbol("strings"); -const _values = Symbol("values"); -const _poolSize = Symbol("poolSize"); -const _flags = Symbol("flags"); -const _results = Symbol("results"); -const PublicPromise = Promise; -type TransactionCallback = (sql: (strings: string, ...values: any[]) => Query) => Promise; - -const { createConnection: _createConnection, createQuery, init } = $zig("postgres.zig", "createBinding"); - -function normalizeSSLMode(value: string): SSLMode { - if (!value) { - return SSLMode.disable; - } - - value = (value + "").toLowerCase(); - switch (value) { - case "disable": - return SSLMode.disable; - case "prefer": - return SSLMode.prefer; - case "require": - case "required": - return SSLMode.require; - case "verify-ca": - case "verify_ca": - return SSLMode.verify_ca; - case "verify-full": - case "verify_full": - return SSLMode.verify_full; - default: { - break; - } - } - - throw $ERR_INVALID_ARG_VALUE("sslmode", value); -} - -enum SQLQueryFlags { - none = 0, - allowUnsafeTransaction = 1 << 0, - unsafe = 1 << 1, - bigint = 1 << 2, - simple = 1 << 3, - notTagged = 1 << 4, -} - -function getQueryHandle(query) { - let handle = query[_handle]; - if (!handle) { - try { - query[_handle] = handle = doCreateQuery( - query[_strings], - query[_values], - query[_flags] & SQLQueryFlags.allowUnsafeTransaction, - query[_poolSize], - query[_flags] & SQLQueryFlags.bigint, - query[_flags] & SQLQueryFlags.simple, - ); - } catch (err) { - query[_queryStatus] |= QueryStatus.error | QueryStatus.invalidHandle; - query.reject(err); - } - } - return handle; -} - -enum SQLCommand { - insert = 0, - update = 1, - updateSet = 2, - where = 3, - whereIn = 4, - none = -1, -} - -function commandToString(command: SQLCommand): string { - switch (command) { - case SQLCommand.insert: - return "INSERT"; - case SQLCommand.updateSet: - case SQLCommand.update: - return "UPDATE"; - case SQLCommand.whereIn: - case SQLCommand.where: - return "WHERE"; - default: - return ""; - } -} - -function detectCommand(query: string): SQLCommand { - const text = query.toLowerCase().trim(); - const text_len = text.length; - - let token = ""; - let command = SQLCommand.none; - let quoted = false; - for (let i = 0; i < text_len; i++) { - const char = text[i]; - switch (char) { - case " ": // Space - case "\n": // Line feed - case "\t": // Tab character - case "\r": // Carriage return - case "\f": // Form feed - case "\v": { - switch (token) { - case "insert": { - if (command === SQLCommand.none) { - return SQLCommand.insert; - } - return command; - } - case "update": { - if (command === SQLCommand.none) { - command = SQLCommand.update; - token = ""; - continue; // try to find SET - } - return command; - } - case "where": { - command = SQLCommand.where; - token = ""; - continue; // try to find IN - } - case "set": { - if (command === SQLCommand.update) { - command = SQLCommand.updateSet; - token = ""; - continue; // try to find WHERE - } - return command; - } - case "in": { - if (command === SQLCommand.where) { - return SQLCommand.whereIn; - } - return command; - } - default: { - token = ""; - continue; - } - } - } - default: { - // skip quoted commands - if (char === '"') { - quoted = !quoted; - continue; - } - if (!quoted) { - token += char; - } - } - } - } - if (token) { - switch (command) { - case SQLCommand.none: { - switch (token) { - case "insert": - return SQLCommand.insert; - case "update": - return SQLCommand.update; - case "where": - return SQLCommand.where; - default: - return SQLCommand.none; - } - } - case SQLCommand.update: { - if (token === "set") { - return SQLCommand.updateSet; - } - return SQLCommand.update; - } - case SQLCommand.where: { - if (token === "in") { - return SQLCommand.whereIn; - } - return SQLCommand.where; - } - } - } - - return command; -} - -function normalizeQuery(strings, values, binding_idx = 1) { - if (typeof strings === "string") { - // identifier or unsafe query - return [strings, values || []]; - } - if (!$isArray(strings)) { - // we should not hit this path - throw new SyntaxError("Invalid query: SQL Fragment cannot be executed or was misused"); - } - const str_len = strings.length; - if (str_len === 0) { - return ["", []]; - } - let binding_values: any[] = []; - let query = ""; - for (let i = 0; i < str_len; i++) { - const string = strings[i]; - - if (typeof string === "string") { - query += string; - if (values.length > i) { - const value = values[i]; - if (value instanceof Query) { - const [sub_query, sub_values] = normalizeQuery(value[_strings], value[_values], binding_idx); - query += sub_query; - for (let j = 0; j < sub_values.length; j++) { - binding_values.push(sub_values[j]); - } - binding_idx += sub_values.length; - } else if (value instanceof SQLHelper) { - const command = detectCommand(query); - // only selectIn, insert, update, updateSet are allowed - if (command === SQLCommand.none || command === SQLCommand.where) { - throw new SyntaxError("Helpers are only allowed for INSERT, UPDATE and WHERE IN commands"); - } - const { columns, value: items } = value as SQLHelper; - const columnCount = columns.length; - if (columnCount === 0 && command !== SQLCommand.whereIn) { - throw new SyntaxError(`Cannot ${commandToString(command)} with no columns`); - } - const lastColumnIndex = columns.length - 1; - - if (command === SQLCommand.insert) { - // - // insert into users ${sql(users)} or insert into users ${sql(user)} - // - - query += "("; - for (let j = 0; j < columnCount; j++) { - query += escapeIdentifier(columns[j]); - if (j < lastColumnIndex) { - query += ", "; - } - } - query += ") VALUES"; - if ($isArray(items)) { - const itemsCount = items.length; - const lastItemIndex = itemsCount - 1; - for (let j = 0; j < itemsCount; j++) { - query += "("; - const item = items[j]; - for (let k = 0; k < columnCount; k++) { - const column = columns[k]; - const columnValue = item[column]; - query += `$${binding_idx++}${k < lastColumnIndex ? ", " : ""}`; - if (typeof columnValue === "undefined") { - binding_values.push(null); - } else { - binding_values.push(columnValue); - } - } - if (j < lastItemIndex) { - query += "),"; - } else { - query += ") "; // the user can add RETURNING * or RETURNING id - } - } - } else { - query += "("; - const item = items; - for (let j = 0; j < columnCount; j++) { - const column = columns[j]; - const columnValue = item[column]; - query += `$${binding_idx++}${j < lastColumnIndex ? ", " : ""}`; - if (typeof columnValue === "undefined") { - binding_values.push(null); - } else { - binding_values.push(columnValue); - } - } - query += ") "; // the user can add RETURNING * or RETURNING id - } - } else if (command === SQLCommand.whereIn) { - // SELECT * FROM users WHERE id IN (${sql([1, 2, 3])}) - if (!$isArray(items)) { - throw new SyntaxError("An array of values is required for WHERE IN helper"); - } - const itemsCount = items.length; - const lastItemIndex = itemsCount - 1; - query += "("; - for (let j = 0; j < itemsCount; j++) { - query += `$${binding_idx++}${j < lastItemIndex ? ", " : ""}`; - if (columnCount > 0) { - // we must use a key from a object - if (columnCount > 1) { - // we should not pass multiple columns here - throw new SyntaxError("Cannot use WHERE IN helper with multiple columns"); - } - // SELECT * FROM users WHERE id IN (${sql(users, "id")}) - const value = items[j]; - if (typeof value === "undefined") { - binding_values.push(null); - } else { - const value_from_key = value[columns[0]]; - - if (typeof value_from_key === "undefined") { - binding_values.push(null); - } else { - binding_values.push(value_from_key); - } - } - } else { - const value = items[j]; - if (typeof value === "undefined") { - binding_values.push(null); - } else { - binding_values.push(value); - } - } - } - query += ") "; // more conditions can be added after this - } else { - // UPDATE users SET ${sql({ name: "John", age: 31 })} WHERE id = 1 - let item; - if ($isArray(items)) { - if (items.length > 1) { - throw new SyntaxError("Cannot use array of objects for UPDATE"); - } - item = items[0]; - } else { - item = items; - } - // no need to include if is updateSet - if (command === SQLCommand.update) { - query += " SET "; - } - for (let i = 0; i < columnCount; i++) { - const column = columns[i]; - const columnValue = item[column]; - query += `${escapeIdentifier(column)} = $${binding_idx++}${i < lastColumnIndex ? ", " : ""}`; - if (typeof columnValue === "undefined") { - binding_values.push(null); - } else { - binding_values.push(columnValue); - } - } - query += " "; // the user can add where clause after this - } - } else { - //TODO: handle sql.array parameters - query += `$${binding_idx++} `; - if (typeof value === "undefined") { - binding_values.push(null); - } else { - binding_values.push(value); - } - } - } - } else { - throw new SyntaxError("Invalid query: SQL Fragment cannot be executed or was misused"); - } - } - - return [query, binding_values]; -} - -class Query extends PublicPromise { - [_resolve]; - [_reject]; - [_handle]; - [_handler]; - [_queryStatus] = 0; - [_strings]; - [_values]; - - [Symbol.for("nodejs.util.inspect.custom")]() { - const status = this[_queryStatus]; - const active = (status & QueryStatus.active) != 0; - const cancelled = (status & QueryStatus.cancelled) != 0; - const executed = (status & QueryStatus.executed) != 0; - const error = (status & QueryStatus.error) != 0; - return `PostgresQuery { ${active ? "active" : ""} ${cancelled ? "cancelled" : ""} ${executed ? "executed" : ""} ${error ? "error" : ""} }`; - } - - constructor(strings, values, flags, poolSize, handler) { - var resolve_, reject_; - super((resolve, reject) => { - resolve_ = resolve; - reject_ = reject; - }); - if (typeof strings === "string") { - if (!(flags & SQLQueryFlags.unsafe)) { - // identifier (cannot be executed in safe mode) - flags |= SQLQueryFlags.notTagged; - strings = escapeIdentifier(strings); - } - } - this[_resolve] = resolve_; - this[_reject] = reject_; - this[_handle] = null; - this[_handler] = handler; - this[_queryStatus] = 0; - this[_poolSize] = poolSize; - this[_strings] = strings; - this[_values] = values; - this[_flags] = flags; - - this[_results] = null; - } - - async [_run](async: boolean) { - const { [_handler]: handler, [_queryStatus]: status } = this; - - if (status & (QueryStatus.executed | QueryStatus.error | QueryStatus.cancelled | QueryStatus.invalidHandle)) { - return; - } - if (this[_flags] & SQLQueryFlags.notTagged) { - this.reject(notTaggedCallError()); - return; - } - this[_queryStatus] |= QueryStatus.executed; - - const handle = getQueryHandle(this); - if (!handle) return this; - - if (async) { - // Ensure it's actually async - // eslint-disable-next-line - await 1; - } - - try { - return handler(this, handle); - } catch (err) { - this[_queryStatus] |= QueryStatus.error; - this.reject(err); - } - } - get active() { - return (this[_queryStatus] & QueryStatus.active) != 0; - } - - set active(value) { - const status = this[_queryStatus]; - if (status & (QueryStatus.cancelled | QueryStatus.error)) { - return; - } - - if (value) { - this[_queryStatus] |= QueryStatus.active; - } else { - this[_queryStatus] &= ~QueryStatus.active; - } - } - - get cancelled() { - return (this[_queryStatus] & QueryStatus.cancelled) !== 0; - } - - resolve(x) { - this[_queryStatus] &= ~QueryStatus.active; - const handle = getQueryHandle(this); - if (!handle) return this; - handle.done(); - return this[_resolve](x); - } - - reject(x) { - this[_queryStatus] &= ~QueryStatus.active; - this[_queryStatus] |= QueryStatus.error; - if (!(this[_queryStatus] & QueryStatus.invalidHandle)) { - const handle = getQueryHandle(this); - if (!handle) return this[_reject](x); - handle.done(); - } - - return this[_reject](x); - } - - cancel() { - var status = this[_queryStatus]; - if (status & QueryStatus.cancelled) { - return this; - } - this[_queryStatus] |= QueryStatus.cancelled; - - if (status & QueryStatus.executed) { - const handle = getQueryHandle(this); - handle.cancel(); - } - - return this; - } - - execute() { - this[_run](false); - return this; - } - - raw() { - const handle = getQueryHandle(this); - if (!handle) return this; - handle.setMode(SQLQueryResultMode.raw); - return this; - } - - simple() { - this[_flags] |= SQLQueryFlags.simple; - return this; - } - - values() { - const handle = getQueryHandle(this); - if (!handle) return this; - handle.setMode(SQLQueryResultMode.values); - return this; - } - - then() { - if (this[_flags] & SQLQueryFlags.notTagged) { - throw notTaggedCallError(); - } - this[_run](true); - const result = super.$then.$apply(this, arguments); - $markPromiseAsHandled(result); - return result; - } - - catch() { - if (this[_flags] & SQLQueryFlags.notTagged) { - throw notTaggedCallError(); - } - this[_run](true); - const result = super.catch.$apply(this, arguments); - $markPromiseAsHandled(result); - return result; - } - - finally() { - if (this[_flags] & SQLQueryFlags.notTagged) { - throw notTaggedCallError(); - } - this[_run](true); - return super.finally.$apply(this, arguments); - } -} -Object.defineProperty(Query, Symbol.species, { value: PublicPromise }); -Object.defineProperty(Query, Symbol.toStringTag, { value: "Query" }); -init( - function onResolvePostgresQuery(query, result, commandTag, count, queries, is_last) { - /// simple queries - if (query[_flags] & SQLQueryFlags.simple) { - // simple can have multiple results or a single result - if (is_last) { - if (queries) { - const queriesIndex = queries.indexOf(query); - if (queriesIndex !== -1) { - queries.splice(queriesIndex, 1); - } - } - try { - query.resolve(query[_results]); - } catch {} - return; - } - $assert(result instanceof SQLResultArray, "Invalid result array"); - // prepare for next query - query[_handle].setPendingValue(new SQLResultArray()); - - if (typeof commandTag === "string") { - if (commandTag.length > 0) { - result.command = commandTag; - } - } else { - result.command = cmds[commandTag]; - } - - result.count = count || 0; - const last_result = query[_results]; - - if (!last_result) { - query[_results] = result; - } else { - if (last_result instanceof SQLResultArray) { - // multiple results - query[_results] = [last_result, result]; - } else { - // 3 or more results - last_result.push(result); - } - } - return; - } - /// prepared statements - $assert(result instanceof SQLResultArray, "Invalid result array"); - if (typeof commandTag === "string") { - if (commandTag.length > 0) { - result.command = commandTag; - } - } else { - result.command = cmds[commandTag]; - } - - result.count = count || 0; - if (queries) { - const queriesIndex = queries.indexOf(query); - if (queriesIndex !== -1) { - queries.splice(queriesIndex, 1); - } - } - try { - query.resolve(result); - } catch {} - }, - function onRejectPostgresQuery(query, reject, queries) { - if (queries) { - const queriesIndex = queries.indexOf(query); - if (queriesIndex !== -1) { - queries.splice(queriesIndex, 1); - } - } - - try { - query.reject(reject); - } catch {} - }, -); - -function onQueryFinish(onClose) { - this.queries.delete(onClose); - this.pool.release(this); -} - -enum PooledConnectionState { - pending = 0, - connected = 1, - closed = 2, -} -enum PooledConnectionFlags { - /// canBeConnected is used to indicate that at least one time we were able to connect to the database - canBeConnected = 1 << 0, - /// reserved is used to indicate that the connection is currently reserved - reserved = 1 << 1, - /// preReserved is used to indicate that the connection will be reserved in the future when queryCount drops to 0 - preReserved = 1 << 2, -} - -class PooledConnection { - pool: ConnectionPool; - connection: $ZigGeneratedClasses.PostgresSQLConnection | null = null; - state: PooledConnectionState = PooledConnectionState.pending; - storedError: Error | null = null; - queries: Set<(err: Error) => void> = new Set(); - onFinish: ((err: Error | null) => void) | null = null; - connectionInfo: any; - flags: number = 0; - /// queryCount is used to indicate the number of queries using the connection, if a connection is reserved or if its a transaction queryCount will be 1 independently of the number of queries - queryCount: number = 0; - #onConnected(err, _) { - const connectionInfo = this.connectionInfo; - if (connectionInfo?.onconnect) { - connectionInfo.onconnect(err); - } - this.storedError = err; - if (!err) { - this.flags |= PooledConnectionFlags.canBeConnected; - } - this.state = err ? PooledConnectionState.closed : PooledConnectionState.connected; - const onFinish = this.onFinish; - if (onFinish) { - this.queryCount = 0; - this.flags &= ~PooledConnectionFlags.reserved; - this.flags &= ~PooledConnectionFlags.preReserved; - - // pool is closed, lets finish the connection - // pool is closed, lets finish the connection - if (err) { - onFinish(err); - } else { - this.connection?.close(); - } - return; - } - this.pool.release(this, true); - } - #onClose(err) { - const connectionInfo = this.connectionInfo; - if (connectionInfo?.onclose) { - connectionInfo.onclose(err); - } - this.state = PooledConnectionState.closed; - this.connection = null; - this.storedError = err; - - // remove from ready connections if its there - this.pool.readyConnections.delete(this); - const queries = new Set(this.queries); - this.queries.clear(); - this.queryCount = 0; - this.flags &= ~PooledConnectionFlags.reserved; - - // notify all queries that the connection is closed - for (const onClose of queries) { - onClose(err); - } - const onFinish = this.onFinish; - if (onFinish) { - onFinish(err); - } - - this.pool.release(this, true); - } - constructor(connectionInfo, pool: ConnectionPool) { - this.state = PooledConnectionState.pending; - this.pool = pool; - this.connectionInfo = connectionInfo; - this.#startConnection(); - } - async #startConnection() { - this.connection = (await createConnection( - this.connectionInfo, - this.#onConnected.bind(this), - this.#onClose.bind(this), - )) as $ZigGeneratedClasses.PostgresSQLConnection; - } - onClose(onClose: (err: Error) => void) { - this.queries.add(onClose); - } - bindQuery(query: Query, onClose: (err: Error) => void) { - this.queries.add(onClose); - // @ts-ignore - query.finally(onQueryFinish.bind(this, onClose)); - } - - #doRetry() { - if (this.pool.closed) { - return; - } - // reset error and state - this.storedError = null; - this.state = PooledConnectionState.pending; - // retry connection - this.#startConnection(); - } - close() { - try { - if (this.state === PooledConnectionState.connected) { - this.connection?.close(); - } - } catch {} - } - flush() { - this.connection?.flush(); - } - retry() { - // if pool is closed, we can't retry - if (this.pool.closed) { - return false; - } - // we need to reconnect - // lets use a retry strategy - - // we can only retry if one day we are able to connect - if (this.flags & PooledConnectionFlags.canBeConnected) { - this.#doRetry(); - } else { - // analyse type of error to see if we can retry - switch (this.storedError?.code) { - case "ERR_POSTGRES_UNSUPPORTED_AUTHENTICATION_METHOD": - case "ERR_POSTGRES_UNKNOWN_AUTHENTICATION_METHOD": - case "ERR_POSTGRES_TLS_NOT_AVAILABLE": - case "ERR_POSTGRES_TLS_UPGRADE_FAILED": - case "ERR_POSTGRES_INVALID_SERVER_SIGNATURE": - case "ERR_POSTGRES_INVALID_SERVER_KEY": - case "ERR_POSTGRES_AUTHENTICATION_FAILED_PBKDF2": - // we can't retry these are authentication errors - return false; - default: - // we can retry - this.#doRetry(); - } - } - return true; - } -} -class ConnectionPool { - connectionInfo: any; - - connections: PooledConnection[]; - readyConnections: Set; - waitingQueue: Array<(err: Error | null, result: any) => void> = []; - reservedQueue: Array<(err: Error | null, result: any) => void> = []; - - poolStarted: boolean = false; - closed: boolean = false; - totalQueries: number = 0; - onAllQueriesFinished: (() => void) | null = null; - constructor(connectionInfo) { - this.connectionInfo = connectionInfo; - this.connections = new Array(connectionInfo.max); - this.readyConnections = new Set(); - } - - maxDistribution() { - if (!this.waitingQueue.length) return 0; - const result = Math.ceil((this.waitingQueue.length + this.totalQueries) / this.connections.length); - return result ? result : 1; - } - - flushConcurrentQueries() { - const maxDistribution = this.maxDistribution(); - if (maxDistribution === 0) { - return; - } - - while (true) { - const nonReservedConnections = Array.from(this.readyConnections).filter( - c => !(c.flags & PooledConnectionFlags.preReserved) && c.queryCount < maxDistribution, - ); - if (nonReservedConnections.length === 0) { - return; - } - const orderedConnections = nonReservedConnections.sort((a, b) => a.queryCount - b.queryCount); - for (const connection of orderedConnections) { - const pending = this.waitingQueue.shift(); - if (!pending) { - return; - } - connection.queryCount++; - this.totalQueries++; - pending(null, connection); - } - } - } - - release(connection: PooledConnection, connectingEvent: boolean = false) { - if (!connectingEvent) { - connection.queryCount--; - this.totalQueries--; - } - const currentQueryCount = connection.queryCount; - if (currentQueryCount == 0) { - connection.flags &= ~PooledConnectionFlags.reserved; - connection.flags &= ~PooledConnectionFlags.preReserved; - } - if (this.onAllQueriesFinished) { - // we are waiting for all queries to finish, lets check if we can call it - if (!this.hasPendingQueries()) { - this.onAllQueriesFinished(); - } - } - - if (connection.state !== PooledConnectionState.connected) { - // connection is not ready - if (connection.storedError) { - // this connection got a error but maybe we can wait for another - - if (this.hasConnectionsAvailable()) { - return; - } - - const waitingQueue = this.waitingQueue; - const reservedQueue = this.reservedQueue; - - this.waitingQueue = []; - this.reservedQueue = []; - // we have no connections available so lets fails - for (const pending of waitingQueue) { - pending(connection.storedError, connection); - } - for (const pending of reservedQueue) { - pending(connection.storedError, connection); - } - } - return; - } - - if (currentQueryCount == 0) { - // ok we can actually bind reserved queries to it - const pendingReserved = this.reservedQueue.shift(); - if (pendingReserved) { - connection.flags |= PooledConnectionFlags.reserved; - connection.queryCount++; - this.totalQueries++; - // we have a connection waiting for a reserved connection lets prioritize it - pendingReserved(connection.storedError, connection); - return; - } - } - this.readyConnections.add(connection); - this.flushConcurrentQueries(); - } - - hasConnectionsAvailable() { - if (this.readyConnections.size > 0) return true; - if (this.poolStarted) { - const pollSize = this.connections.length; - for (let i = 0; i < pollSize; i++) { - const connection = this.connections[i]; - if (connection.state !== PooledConnectionState.closed) { - // some connection is connecting or connected - return true; - } - } - } - return false; - } - - hasPendingQueries() { - if (this.waitingQueue.length > 0 || this.reservedQueue.length > 0) return true; - if (this.poolStarted) { - return this.totalQueries > 0; - } - return false; - } - isConnected() { - if (this.readyConnections.size > 0) { - return true; - } - if (this.poolStarted) { - const pollSize = this.connections.length; - for (let i = 0; i < pollSize; i++) { - const connection = this.connections[i]; - if (connection.state === PooledConnectionState.connected) { - return true; - } - } - } - return false; - } - flush() { - if (this.closed) { - return; - } - if (this.poolStarted) { - const pollSize = this.connections.length; - for (let i = 0; i < pollSize; i++) { - const connection = this.connections[i]; - if (connection.state === PooledConnectionState.connected) { - connection.connection?.flush(); - } - } - } - } - - async #close() { - let pending; - while ((pending = this.waitingQueue.shift())) { - pending(connectionClosedError(), null); - } - while (this.reservedQueue.length > 0) { - const pendingReserved = this.reservedQueue.shift(); - if (pendingReserved) { - pendingReserved(connectionClosedError(), null); - } - } - const promises: Array> = []; - if (this.poolStarted) { - this.poolStarted = false; - const pollSize = this.connections.length; - for (let i = 0; i < pollSize; i++) { - const connection = this.connections[i]; - switch (connection.state) { - case PooledConnectionState.pending: - { - const { promise, resolve } = Promise.withResolvers(); - connection.onFinish = resolve; - promises.push(promise); - connection.connection?.close(); - } - break; - case PooledConnectionState.connected: - { - const { promise, resolve } = Promise.withResolvers(); - connection.onFinish = resolve; - promises.push(promise); - connection.connection?.close(); - } - break; - } - // clean connection reference - // @ts-ignore - this.connections[i] = null; - } - } - this.readyConnections.clear(); - this.waitingQueue.length = 0; - return Promise.all(promises); - } - async close(options?: { timeout?: number }) { - if (this.closed) { - return; - } - let timeout = options?.timeout; - if (timeout) { - timeout = Number(timeout); - if (timeout > 2 ** 31 || timeout < 0 || timeout !== timeout) { - throw $ERR_INVALID_ARG_VALUE("options.timeout", timeout, "must be a non-negative integer less than 2^31"); - } - this.closed = true; - if (timeout === 0 || !this.hasPendingQueries()) { - // close immediately - await this.#close(); - return; - } - - const { promise, resolve } = Promise.withResolvers(); - const timer = setTimeout(() => { - // timeout is reached, lets close and probably fail some queries - this.#close().finally(resolve); - }, timeout * 1000); - timer.unref(); // dont block the event loop - this.onAllQueriesFinished = () => { - clearTimeout(timer); - // everything is closed, lets close the pool - this.#close().finally(resolve); - }; - - return promise; - } else { - this.closed = true; - if (!this.hasPendingQueries()) { - // close immediately - await this.#close(); - return; - } - // gracefully close the pool - const { promise, resolve } = Promise.withResolvers(); - this.onAllQueriesFinished = () => { - // everything is closed, lets close the pool - this.#close().finally(resolve); - }; - return promise; - } - } - - /** - * @param {function} onConnected - The callback function to be called when the connection is established. - * @param {boolean} reserved - Whether the connection is reserved, if is reserved the connection will not be released until release is called, if not release will only decrement the queryCount counter - */ - connect(onConnected: (err: Error | null, result: any) => void, reserved: boolean = false) { - if (this.closed) { - return onConnected(connectionClosedError(), null); - } - - if (this.readyConnections.size === 0) { - // no connection ready lets make some - let retry_in_progress = false; - let all_closed = true; - let storedError: Error | null = null; - - if (this.poolStarted) { - // we already started the pool - // lets check if some connection is available to retry - const pollSize = this.connections.length; - for (let i = 0; i < pollSize; i++) { - const connection = this.connections[i]; - // we need a new connection and we have some connections that can retry - if (connection.state === PooledConnectionState.closed) { - if (connection.retry()) { - // lets wait for connection to be released - if (!retry_in_progress) { - // avoid adding to the queue twice, we wanna to retry every available pool connection - retry_in_progress = true; - if (reserved) { - // we are not sure what connection will be available so we dont pre reserve - this.reservedQueue.push(onConnected); - } else { - this.waitingQueue.push(onConnected); - } - } - } else { - // we have some error, lets grab it and fail if unable to start a connection - storedError = connection.storedError; - } - } else { - // we have some pending or open connections - all_closed = false; - } - } - if (!all_closed && !retry_in_progress) { - // is possible to connect because we have some working connections, or we are just without network for some reason - // wait for connection to be released or fail - if (reserved) { - // we are not sure what connection will be available so we dont pre reserve - this.reservedQueue.push(onConnected); - } else { - this.waitingQueue.push(onConnected); - } - } else if (!retry_in_progress) { - // impossible to connect or retry - onConnected(storedError ?? connectionClosedError(), null); - } - return; - } - // we never started the pool, lets start it - if (reserved) { - this.reservedQueue.push(onConnected); - } else { - this.waitingQueue.push(onConnected); - } - this.poolStarted = true; - const pollSize = this.connections.length; - // pool is always at least 1 connection - const firstConnection = new PooledConnection(this.connectionInfo, this); - this.connections[0] = firstConnection; - if (reserved) { - firstConnection.flags |= PooledConnectionFlags.preReserved; // lets pre reserve the first connection - } - for (let i = 1; i < pollSize; i++) { - this.connections[i] = new PooledConnection(this.connectionInfo, this); - } - return; - } - if (reserved) { - let connectionWithLeastQueries: PooledConnection | null = null; - let leastQueries = Infinity; - for (const connection of this.readyConnections) { - if (connection.flags & PooledConnectionFlags.preReserved || connection.flags & PooledConnectionFlags.reserved) - continue; - const queryCount = connection.queryCount; - if (queryCount > 0) { - if (queryCount < leastQueries) { - leastQueries = queryCount; - connectionWithLeastQueries = connection; - } - continue; - } - connection.flags |= PooledConnectionFlags.reserved; - connection.queryCount++; - this.totalQueries++; - this.readyConnections.delete(connection); - onConnected(null, connection); - return; - } - if (connectionWithLeastQueries) { - // lets mark the connection with the least queries as preReserved if any - connectionWithLeastQueries.flags |= PooledConnectionFlags.preReserved; - } - // no connection available to be reserved lets wait for a connection to be released - this.reservedQueue.push(onConnected); - } else { - this.waitingQueue.push(onConnected); - this.flushConcurrentQueries(); - } - } -} - -async function createConnection(options, onConnected, onClose) { - const { - hostname, - port, - username, - tls, - query, - database, - sslMode, - idleTimeout = 0, - connectionTimeout = 30 * 1000, - maxLifetime = 0, - prepare = true, - path, - } = options; - - let password = options.password; - try { - if (typeof password === "function") { - password = password(); - if (password && $isPromise(password)) { - password = await password; - } - } - return _createConnection( - hostname, - Number(port), - username || "", - password || "", - database || "", - // > The default value for sslmode is prefer. As is shown in the table, this - // makes no sense from a security point of view, and it only promises - // performance overhead if possible. It is only provided as the default for - // backward compatibility, and is not recommended in secure deployments. - sslMode || SSLMode.disable, - tls || null, - query || "", - path || "", - onConnected, - onClose, - idleTimeout, - connectionTimeout, - maxLifetime, - !prepare, - ) as $ZigGeneratedClasses.PostgresSQLConnection; - } catch (e) { - onClose(e); - } -} - -function doCreateQuery(strings, values, allowUnsafeTransaction, poolSize, bigint, simple) { - const [sqlString, final_values] = normalizeQuery(strings, values); - if (!allowUnsafeTransaction) { - if (poolSize !== 1) { - const upperCaseSqlString = sqlString.toUpperCase().trim(); - if (upperCaseSqlString.startsWith("BEGIN") || upperCaseSqlString.startsWith("START TRANSACTION")) { - throw $ERR_POSTGRES_UNSAFE_TRANSACTION("Only use sql.begin, sql.reserved or max: 1"); - } - } - } - return createQuery(sqlString, final_values, new SQLResultArray(), undefined, !!bigint, !!simple); -} - -class SQLHelper { - value: any; - columns: string[]; - constructor(value, keys) { - if (keys?.length === 0) { - keys = Object.keys(value[0]); - } - - for (let key of keys) { - if (typeof key === "string") { - const asNumber = Number(key); - if (Number.isNaN(asNumber)) { - continue; - } - key = asNumber; - } - - if (typeof key !== "string") { - if (Number.isSafeInteger(key)) { - if (key >= 0 && key <= 64 * 1024) { - continue; - } - } - - throw new Error(`Keys must be strings or numbers: ${key}`); - } - } - - this.value = value; - this.columns = keys; - } -} - -function decodeIfValid(value) { - if (value) { - return decodeURIComponent(value); - } - return null; -} -function loadOptions(o: Bun.SQL.Options) { - var hostname, - port, - username, - password, - database, - tls, - url, - query, - adapter, - idleTimeout, - connectionTimeout, - maxLifetime, - onconnect, - onclose, - max, - bigint, - path; - let prepare = true; - const env = Bun.env || {}; - var sslMode: SSLMode = SSLMode.disable; - - if (o === undefined || (typeof o === "string" && o.length === 0)) { - let urlString = env.POSTGRES_URL || env.DATABASE_URL || env.PGURL || env.PG_URL; - if (!urlString) { - urlString = env.TLS_POSTGRES_DATABASE_URL || env.TLS_DATABASE_URL; - if (urlString) { - sslMode = SSLMode.require; - } - } - - if (urlString) { - url = new URL(urlString); - o = {}; - } - } else if (o && typeof o === "object") { - if (o instanceof URL) { - url = o; - } else if (o?.url) { - const _url = o.url; - if (typeof _url === "string") { - url = new URL(_url); - } else if (_url && typeof _url === "object" && _url instanceof URL) { - url = _url; - } - } - if (o?.tls) { - sslMode = SSLMode.require; - tls = o.tls; - } - } else if (typeof o === "string") { - url = new URL(o); - } - o ||= {}; - query = ""; - - if (url) { - ({ hostname, port, username, password, adapter } = o); - // object overrides url - hostname ||= url.hostname; - port ||= url.port; - username ||= decodeIfValid(url.username); - password ||= decodeIfValid(url.password); - adapter ||= url.protocol; - - if (adapter[adapter.length - 1] === ":") { - adapter = adapter.slice(0, -1); - } - - const queryObject = url.searchParams.toJSON(); - for (const key in queryObject) { - if (key.toLowerCase() === "sslmode") { - sslMode = normalizeSSLMode(queryObject[key]); - } else if (key.toLowerCase() === "path") { - path = queryObject[key]; - } else { - // this is valid for postgres for other databases it might not be valid - // check adapter then implement for other databases - // encode string with \0 as finalizer - // must be key\0value\0 - query += `${key}\0${queryObject[key]}\0`; - } - } - query = query.trim(); - } - hostname ||= o.hostname || o.host || env.PGHOST || "localhost"; - - port ||= Number(o.port || env.PGPORT || 5432); - - path ||= o.path || ""; - // add /.s.PGSQL.${port} if it doesn't exist - if (path && path?.indexOf("/.s.PGSQL.") === -1) { - path = `${path}/.s.PGSQL.${port}`; - } - - username ||= o.username || o.user || env.PGUSERNAME || env.PGUSER || env.USER || env.USERNAME || "postgres"; - database ||= o.database || o.db || decodeIfValid((url?.pathname ?? "").slice(1)) || env.PGDATABASE || username; - password ||= o.password || o.pass || env.PGPASSWORD || ""; - const connection = o.connection; - if (connection && $isObject(connection)) { - for (const key in connection) { - if (connection[key] !== undefined) { - query += `${key}\0${connection[key]}\0`; - } - } - } - tls ||= o.tls || o.ssl; - adapter ||= o.adapter || "postgres"; - max = o.max; - - idleTimeout ??= o.idleTimeout; - idleTimeout ??= o.idle_timeout; - connectionTimeout ??= o.connectionTimeout; - connectionTimeout ??= o.connection_timeout; - connectionTimeout ??= o.connectTimeout; - connectionTimeout ??= o.connect_timeout; - maxLifetime ??= o.maxLifetime; - maxLifetime ??= o.max_lifetime; - bigint ??= o.bigint; - // we need to explicitly set prepare to false if it is false - if (o.prepare === false) { - prepare = false; - } - - onconnect ??= o.onconnect; - onclose ??= o.onclose; - if (onconnect !== undefined) { - if (!$isCallable(onconnect)) { - throw $ERR_INVALID_ARG_TYPE("onconnect", "function", onconnect); - } - } - - if (onclose !== undefined) { - if (!$isCallable(onclose)) { - throw $ERR_INVALID_ARG_TYPE("onclose", "function", onclose); - } - } - - if (idleTimeout != null) { - idleTimeout = Number(idleTimeout); - if (idleTimeout > 2 ** 31 || idleTimeout < 0 || idleTimeout !== idleTimeout) { - throw $ERR_INVALID_ARG_VALUE( - "options.idle_timeout", - idleTimeout, - "must be a non-negative integer less than 2^31", - ); - } - idleTimeout *= 1000; - } - - if (connectionTimeout != null) { - connectionTimeout = Number(connectionTimeout); - if (connectionTimeout > 2 ** 31 || connectionTimeout < 0 || connectionTimeout !== connectionTimeout) { - throw $ERR_INVALID_ARG_VALUE( - "options.connection_timeout", - connectionTimeout, - "must be a non-negative integer less than 2^31", - ); - } - connectionTimeout *= 1000; - } - - if (maxLifetime != null) { - maxLifetime = Number(maxLifetime); - if (maxLifetime > 2 ** 31 || maxLifetime < 0 || maxLifetime !== maxLifetime) { - throw $ERR_INVALID_ARG_VALUE( - "options.max_lifetime", - maxLifetime, - "must be a non-negative integer less than 2^31", - ); - } - maxLifetime *= 1000; - } - - if (max != null) { - max = Number(max); - if (max > 2 ** 31 || max < 1 || max !== max) { - throw $ERR_INVALID_ARG_VALUE("options.max", max, "must be a non-negative integer between 1 and 2^31"); - } - } - - if (sslMode !== SSLMode.disable && !tls?.serverName) { - if (hostname) { - tls = { ...tls, serverName: hostname }; - } else if (tls) { - tls = true; - } - } - - if (tls && sslMode === SSLMode.disable) { - sslMode = SSLMode.prefer; - } - port = Number(port); - - if (!Number.isSafeInteger(port) || port < 1 || port > 65535) { - throw $ERR_INVALID_ARG_VALUE("port", port, "must be a non-negative integer between 1 and 65535"); - } - - switch (adapter) { - case "postgres": - case "postgresql": - adapter = "postgres"; - break; - default: - throw new Error(`Unsupported adapter: ${adapter}. Only \"postgres\" is supported for now`); - } - const ret: any = { hostname, port, username, password, database, tls, query, sslMode, adapter, prepare, bigint }; - if (idleTimeout != null) { - ret.idleTimeout = idleTimeout; - } - if (connectionTimeout != null) { - ret.connectionTimeout = connectionTimeout; - } - if (maxLifetime != null) { - ret.maxLifetime = maxLifetime; - } - if (onconnect !== undefined) { - ret.onconnect = onconnect; - } - if (onclose !== undefined) { - ret.onclose = onclose; - } - ret.max = max || 10; - - return ret; -} +type TransactionCallback = (sql: (strings: string, ...values: any[]) => Query) => Promise; enum ReservedConnectionState { acceptQueries = 1 << 0, closed = 1 << 1, } -function assertValidTransactionName(name: string) { - if (name.indexOf("'") !== -1) { - throw Error(`Distributed transaction name cannot contain single quotes.`); +interface TransactionState { + connectionState: ReservedConnectionState; + reject: (err: Error) => void; + storedError?: Error | null | undefined; + queries: Set>; +} + +function adapterFromOptions(options: Bun.SQL.__internal.DefinedOptions) { + switch (options.adapter) { + case "postgres": + return new PostgresAdapter(options); + case "sqlite": + return new SQLiteAdapter(options); + default: + throw new Error(`Unsupported adapter: ${(options as { adapter?: string }).adapter}.`); } } -function SQL(o, e = {}) { - if (typeof o === "string" || o instanceof URL) { - o = { ...e, url: o }; - } - var connectionInfo = loadOptions(o); - var pool = new ConnectionPool(connectionInfo); +const SQL: typeof Bun.SQL = function SQL( + stringOrUrlOrOptions: Bun.SQL.Options | string | undefined = undefined, + definitelyOptionsButMaybeEmpty: Bun.SQL.Options = {}, +): Bun.SQL { + const connectionInfo = parseOptions(stringOrUrlOrOptions, definitelyOptionsButMaybeEmpty); - function onQueryDisconnected(err) { + const pool = adapterFromOptions(connectionInfo); + + function onQueryDisconnected(this: Query, err: Error) { // connection closed mid query this will not be called if the query finishes first const query = this; + if (err) { return query.reject(err); } + // query is cancelled when waiting for a connection from the pool if (query.cancelled) { - return query.reject($ERR_POSTGRES_QUERY_CANCELLED("Query cancelled")); + return query.reject( + new PostgresError("Query cancelled", { + code: "ERR_POSTGRES_QUERY_CANCELLED", + }), + ); } } - function onQueryConnected(handle, err, pooledConnection) { + function onQueryConnected( + this: Query, + handle: BaseQueryHandle, + err, + connectionHandle: ConnectionHandle, + ) { const query = this; if (err) { // fail to aquire a connection from the pool @@ -1576,72 +75,121 @@ function SQL(o, e = {}) { } // query is cancelled when waiting for a connection from the pool if (query.cancelled) { - pool.release(pooledConnection); // release the connection back to the pool - return query.reject($ERR_POSTGRES_QUERY_CANCELLED("Query cancelled")); + pool.release(connectionHandle); // release the connection back to the pool + return query.reject( + new PostgresError("Query cancelled", { + code: "ERR_POSTGRES_QUERY_CANCELLED", + }), + ); } - // bind close event to the query (will unbind and auto release the connection when the query is finished) - pooledConnection.bindQuery(query, onQueryDisconnected.bind(query)); - handle.run(pooledConnection.connection, query); + if (connectionHandle.bindQuery) { + connectionHandle.bindQuery(query, onQueryDisconnected.bind(query)); + } + + try { + const connection = pool.getConnectionForQuery ? pool.getConnectionForQuery(connectionHandle) : connectionHandle; + const result = handle.run(connection, query); + + if (result && $isPromise(result)) { + result.catch(err => query.reject(err)); + } + } catch (err) { + query.reject(err); + } } function queryFromPoolHandler(query, handle, err) { if (err) { // fail to create query return query.reject(err); } + // query is cancelled if (!handle || query.cancelled) { - return query.reject($ERR_POSTGRES_QUERY_CANCELLED("Query cancelled")); + return query.reject( + new PostgresError("Query cancelled", { + code: "ERR_POSTGRES_QUERY_CANCELLED", + }), + ); } pool.connect(onQueryConnected.bind(query, handle)); } - function queryFromPool(strings, values) { + + function queryFromPool( + strings: string | TemplateStringsArray | import("internal/sql/shared.ts").SQLHelper | Query, + values: any[], + ) { try { return new Query( strings, values, connectionInfo.bigint ? SQLQueryFlags.bigint : SQLQueryFlags.none, - connectionInfo.max, queryFromPoolHandler, + pool, ); } catch (err) { return Promise.reject(err); } } - function unsafeQuery(strings, values) { + function unsafeQuery( + strings: string | TemplateStringsArray | import("internal/sql/shared.ts").SQLHelper | Query, + values: any[], + ) { try { let flags = connectionInfo.bigint ? SQLQueryFlags.bigint | SQLQueryFlags.unsafe : SQLQueryFlags.unsafe; if ((values?.length ?? 0) === 0) { flags |= SQLQueryFlags.simple; } - return new Query(strings, values, flags, connectionInfo.max, queryFromPoolHandler); + return new Query(strings, values, flags, queryFromPoolHandler, pool); } catch (err) { return Promise.reject(err); } } - function onTransactionQueryDisconnected(query) { + function onTransactionQueryDisconnected(query: Query) { const transactionQueries = this; transactionQueries.delete(query); } + function queryFromTransactionHandler(transactionQueries, query, handle, err) { const pooledConnection = this; if (err) { transactionQueries.delete(query); return query.reject(err); } + // query is cancelled if (query.cancelled) { transactionQueries.delete(query); - return query.reject($ERR_POSTGRES_QUERY_CANCELLED("Query cancelled")); + return query.reject( + new PostgresError("Query cancelled", { + code: "ERR_POSTGRES_QUERY_CANCELLED", + }), + ); } query.finally(onTransactionQueryDisconnected.bind(transactionQueries, query)); - handle.run(pooledConnection.connection, query); + + try { + // Use adapter method to get the actual connection + const connection = pool.getConnectionForQuery ? pool.getConnectionForQuery(pooledConnection) : pooledConnection; + const result = handle.run(connection, query); + if (result && $isPromise(result)) { + result.catch(err => query.reject(err)); + } + } catch (err) { + query.reject(err); + } } - function queryFromTransaction(strings, values, pooledConnection, transactionQueries) { + + function queryFromTransaction( + strings: string | TemplateStringsArray | import("internal/sql/shared.ts").SQLHelper | Query, + values: any[], + pooledConnection: PooledPostgresConnection, + transactionQueries: Set>, + ) { try { const query = new Query( strings, @@ -1649,16 +197,23 @@ function SQL(o, e = {}) { connectionInfo.bigint ? SQLQueryFlags.allowUnsafeTransaction | SQLQueryFlags.bigint : SQLQueryFlags.allowUnsafeTransaction, - connectionInfo.max, queryFromTransactionHandler.bind(pooledConnection, transactionQueries), + pool, ); + transactionQueries.add(query); return query; } catch (err) { return Promise.reject(err); } } - function unsafeQueryFromTransaction(strings, values, pooledConnection, transactionQueries) { + + function unsafeQueryFromTransaction( + strings: string | TemplateStringsArray | import("internal/sql/shared.ts").SQLHelper | Query, + values: any[], + pooledConnection: PooledPostgresConnection, + transactionQueries: Set>, + ) { try { let flags = connectionInfo.bigint ? SQLQueryFlags.allowUnsafeTransaction | SQLQueryFlags.unsafe | SQLQueryFlags.bigint @@ -1671,8 +226,8 @@ function SQL(o, e = {}) { strings, values, flags, - connectionInfo.max, queryFromTransactionHandler.bind(pooledConnection, transactionQueries), + pool, ); transactionQueries.add(query); return query; @@ -1681,36 +236,41 @@ function SQL(o, e = {}) { } } - function onTransactionDisconnected(err) { + function onTransactionDisconnected(this: TransactionState, err: Error) { const reject = this.reject; this.connectionState |= ReservedConnectionState.closed; for (const query of this.queries) { - (query as Query).reject(err); + query.reject(err); } + if (err) { return reject(err); } } - function onReserveConnected(err, pooledConnection) { + function onReserveConnected(this: Query, err: Error | null, pooledConnection) { const { resolve, reject } = this; + if (err) { return reject(err); } let reservedTransaction = new Set(); - const state = { + const state: TransactionState = { connectionState: ReservedConnectionState.acceptQueries, reject, storedError: null, queries: new Set(), }; - const onClose = onTransactionDisconnected.bind(state); - pooledConnection.onClose(onClose); - function reserved_sql(strings, ...values) { + const onClose = onTransactionDisconnected.bind(state); + if (pooledConnection.onClose) { + pooledConnection.onClose(onClose); + } + + function reserved_sql(strings: string | TemplateStringsArray | SQLHelper | Query, ...values: any[]) { if ( state.connectionState & ReservedConnectionState.closed || !(state.connectionState & ReservedConnectionState.acceptQueries) @@ -1719,7 +279,7 @@ function SQL(o, e = {}) { } if ($isArray(strings)) { // detect if is tagged template - if (!$isArray((strings as unknown as TemplateStringsArray).raw)) { + if (!$isArray(strings.raw)) { return new SQLHelper(strings, values); } } else if (typeof strings === "object" && !(strings instanceof Query) && !(strings instanceof SQLHelper)) { @@ -1728,9 +288,11 @@ function SQL(o, e = {}) { // we use the same code path as the transaction sql return queryFromTransaction(strings, values, pooledConnection, state.queries); } + reserved_sql.unsafe = (string, args = []) => { return unsafeQueryFromTransaction(string, args, pooledConnection, state.queries); }; + reserved_sql.file = async (path: string, args = []) => { return await Bun.file(path) .text() @@ -1738,6 +300,7 @@ function SQL(o, e = {}) { return unsafeQueryFromTransaction(text, args, pooledConnection, state.queries); }); }; + reserved_sql.connect = () => { if (state.connectionState & ReservedConnectionState.closed) { return Promise.reject(connectionClosedError()); @@ -1746,36 +309,20 @@ function SQL(o, e = {}) { }; reserved_sql.commitDistributed = async function (name: string) { - const adapter = connectionInfo.adapter; - assertValidTransactionName(name); - switch (adapter) { - case "postgres": - return await reserved_sql.unsafe(`COMMIT PREPARED '${name}'`); - case "mysql": - return await reserved_sql.unsafe(`XA COMMIT '${name}'`); - case "mssql": - throw Error(`MSSQL distributed transaction is automatically committed.`); - case "sqlite": - throw Error(`SQLite dont support distributed transactions.`); - default: - throw Error(`Unsupported adapter: ${adapter}.`); + if (!pool.getCommitDistributedSQL) { + throw Error(`This adapter doesn't support distributed transactions.`); } + + const sql = pool.getCommitDistributedSQL(name); + return await reserved_sql.unsafe(sql); }; reserved_sql.rollbackDistributed = async function (name: string) { - assertValidTransactionName(name); - const adapter = connectionInfo.adapter; - switch (adapter) { - case "postgres": - return await reserved_sql.unsafe(`ROLLBACK PREPARED '${name}'`); - case "mysql": - return await reserved_sql.unsafe(`XA ROLLBACK '${name}'`); - case "mssql": - throw Error(`MSSQL distributed transaction is automatically rolled back.`); - case "sqlite": - throw Error(`SQLite dont support distributed transactions.`); - default: - throw Error(`Unsupported adapter: ${adapter}.`); + if (!pool.getRollbackDistributedSQL) { + throw Error(`This adapter doesn't support distributed transactions.`); } + + const sql = pool.getRollbackDistributedSQL(name); + return await reserved_sql.unsafe(sql); }; // reserve is allowed to be called inside reserved connection but will return a new reserved connection from the pool @@ -1836,7 +383,11 @@ function SQL(o, e = {}) { if (state.connectionState & ReservedConnectionState.closed) { throw connectionClosedError(); } - return pooledConnection.flush(); + // Use pooled connection's flush if available, otherwise use adapter's flush + if (pooledConnection.flush) { + return pooledConnection.flush(); + } + return pool.flush(); }; reserved_sql.close = async (options?: { timeout?: number }) => { const reserveQueries = state.queries; @@ -1861,7 +412,7 @@ function SQL(o, e = {}) { const timer = setTimeout(() => { state.connectionState |= ReservedConnectionState.closed; for (const query of reserveQueries) { - (query as Query).cancel(); + (query as Query).cancel(); } state.connectionState |= ReservedConnectionState.closed; pooledConnection.close(); @@ -1878,7 +429,7 @@ function SQL(o, e = {}) { } state.connectionState |= ReservedConnectionState.closed; for (const query of reserveQueries) { - (query as Query).cancel(); + (query as Query).cancel(); } pooledConnection.close(); @@ -1895,7 +446,10 @@ function SQL(o, e = {}) { // just release the connection back to the pool state.connectionState |= ReservedConnectionState.closed; state.connectionState &= ~ReservedConnectionState.acceptQueries; - pooledConnection.queries.delete(onClose); + // Use adapter method to detach connection close handler + if (pool.detachConnectionCloseHandler) { + pool.detachConnectionCloseHandler(pooledConnection, onClose); + } pool.release(pooledConnection); return Promise.resolve(undefined); }; @@ -1944,7 +498,8 @@ function SQL(o, e = {}) { if (err) { return reject(err); } - const state = { + + const state: TransactionState = { connectionState: ReservedConnectionState.acceptQueries, reject, queries: new Set(), @@ -1952,86 +507,60 @@ function SQL(o, e = {}) { let savepoints = 0; let transactionSavepoints = new Set(); - const adapter = connectionInfo.adapter; - let BEGIN_COMMAND: string = "BEGIN"; - let ROLLBACK_COMMAND: string = "ROLLBACK"; - let COMMIT_COMMAND: string = "COMMIT"; - let SAVEPOINT_COMMAND: string = "SAVEPOINT"; - let RELEASE_SAVEPOINT_COMMAND: string | null = "RELEASE SAVEPOINT"; - let ROLLBACK_TO_SAVEPOINT_COMMAND: string = "ROLLBACK TO SAVEPOINT"; - // MySQL and maybe other adapters need to call XA END or some other command before commit or rollback in a distributed transaction + + let BEGIN_COMMAND: string; + let ROLLBACK_COMMAND: string; + let COMMIT_COMMAND: string; + let SAVEPOINT_COMMAND: string; + let RELEASE_SAVEPOINT_COMMAND: string | null; + let ROLLBACK_TO_SAVEPOINT_COMMAND: string; let BEFORE_COMMIT_OR_ROLLBACK_COMMAND: string | null = null; + if (distributed) { - if (options.indexOf("'") !== -1) { + // Get distributed transaction commands from adapter + const commands = pool.getDistributedTransactionCommands?.(options); + if (!commands) { pool.release(pooledConnection); - return reject(new Error(`Distributed transaction name cannot contain single quotes.`)); + return reject(new Error(`This adapter doesn't support distributed transactions.`)); } - // distributed transaction - // in distributed transaction options is the name/id of the transaction - switch (adapter) { - case "postgres": - // in postgres we only need to call prepare transaction instead of commit - COMMIT_COMMAND = `PREPARE TRANSACTION '${options}'`; - break; - case "mysql": - // MySQL we use XA transactions - // START TRANSACTION is autocommit false - BEGIN_COMMAND = `XA START '${options}'`; - BEFORE_COMMIT_OR_ROLLBACK_COMMAND = `XA END '${options}'`; - COMMIT_COMMAND = `XA PREPARE '${options}'`; - ROLLBACK_COMMAND = `XA ROLLBACK '${options}'`; - break; - case "sqlite": - pool.release(pooledConnection); - // do not support options just use defaults - return reject(new Error(`SQLite dont support distributed transactions.`)); - case "mssql": - BEGIN_COMMAND = ` BEGIN DISTRIBUTED TRANSACTION ${options}`; - ROLLBACK_COMMAND = `ROLLBACK TRANSACTION ${options}`; - COMMIT_COMMAND = `COMMIT TRANSACTION ${options}`; - break; - default: - pool.release(pooledConnection); - - // TODO: use ERR_ - return reject(new Error(`Unsupported adapter: ${adapter}.`)); - } + BEGIN_COMMAND = commands.BEGIN; + COMMIT_COMMAND = commands.COMMIT; + ROLLBACK_COMMAND = commands.ROLLBACK; + SAVEPOINT_COMMAND = commands.SAVEPOINT; + RELEASE_SAVEPOINT_COMMAND = commands.RELEASE_SAVEPOINT; + ROLLBACK_TO_SAVEPOINT_COMMAND = commands.ROLLBACK_TO_SAVEPOINT; + BEFORE_COMMIT_OR_ROLLBACK_COMMAND = commands.BEFORE_COMMIT_OR_ROLLBACK || null; } else { - // normal transaction - switch (adapter) { - case "postgres": - if (options) { - BEGIN_COMMAND = `BEGIN ${options}`; - } - break; - case "mysql": - // START TRANSACTION is autocommit false - BEGIN_COMMAND = options ? `START TRANSACTION ${options}` : "START TRANSACTION"; - break; - - case "sqlite": - if (options) { - // sqlite supports DEFERRED, IMMEDIATE, EXCLUSIVE - BEGIN_COMMAND = `BEGIN ${options}`; - } - break; - case "mssql": - BEGIN_COMMAND = options ? `START TRANSACTION ${options}` : "START TRANSACTION"; - ROLLBACK_COMMAND = "ROLLBACK TRANSACTION"; - COMMIT_COMMAND = "COMMIT TRANSACTION"; - SAVEPOINT_COMMAND = "SAVE"; - RELEASE_SAVEPOINT_COMMAND = null; // mssql dont have release savepoint - ROLLBACK_TO_SAVEPOINT_COMMAND = "ROLLBACK TRANSACTION"; - break; - default: + // Validate transaction options if provided + if (options && pool.validateTransactionOptions) { + const validation = pool.validateTransactionOptions(options); + if (!validation.valid) { pool.release(pooledConnection); - // TODO: use ERR_ - return reject(new Error(`Unsupported adapter: ${adapter}.`)); + return reject(new Error(validation.error)); + } + } + + try { + const commands = pool.getTransactionCommands(options); + BEGIN_COMMAND = commands.BEGIN; + COMMIT_COMMAND = commands.COMMIT; + ROLLBACK_COMMAND = commands.ROLLBACK; + SAVEPOINT_COMMAND = commands.SAVEPOINT; + RELEASE_SAVEPOINT_COMMAND = commands.RELEASE_SAVEPOINT; + ROLLBACK_TO_SAVEPOINT_COMMAND = commands.ROLLBACK_TO_SAVEPOINT; + BEFORE_COMMIT_OR_ROLLBACK_COMMAND = commands.BEFORE_COMMIT_OR_ROLLBACK || null; + } catch (err) { + pool.release(pooledConnection); + return reject(err); } } + const onClose = onTransactionDisconnected.bind(state); - pooledConnection.onClose(onClose); + // Use adapter method to attach connection close handler + if (pool.attachConnectionCloseHandler) { + pool.attachConnectionCloseHandler(pooledConnection, onClose); + } function run_internal_transaction_sql(string) { if (state.connectionState & ReservedConnectionState.closed) { @@ -2039,7 +568,10 @@ function SQL(o, e = {}) { } return unsafeQueryFromTransaction(string, [], pooledConnection, state.queries); } - function transaction_sql(strings, ...values) { + function transaction_sql( + strings: string | TemplateStringsArray | import("internal/sql/shared.ts").SQLHelper | Query, + ...values: any[] + ) { if ( state.connectionState & ReservedConnectionState.closed || !(state.connectionState & ReservedConnectionState.acceptQueries) @@ -2079,57 +611,53 @@ function SQL(o, e = {}) { return Promise.resolve(transaction_sql); }; transaction_sql.commitDistributed = async function (name: string) { - assertValidTransactionName(name); - switch (adapter) { - case "postgres": - return await run_internal_transaction_sql(`COMMIT PREPARED '${name}'`); - case "mysql": - return await run_internal_transaction_sql(`XA COMMIT '${name}'`); - case "mssql": - throw Error(`MSSQL distributed transaction is automatically committed.`); - case "sqlite": - throw Error(`SQLite dont support distributed transactions.`); - default: - throw Error(`Unsupported adapter: ${adapter}.`); + if (!pool.getCommitDistributedSQL) { + throw Error(`This adapter doesn't support distributed transactions.`); } + + const sql = pool.getCommitDistributedSQL(name); + return await run_internal_transaction_sql(sql); }; transaction_sql.rollbackDistributed = async function (name: string) { - assertValidTransactionName(name); - switch (adapter) { - case "postgres": - return await run_internal_transaction_sql(`ROLLBACK PREPARED '${name}'`); - case "mysql": - return await run_internal_transaction_sql(`XA ROLLBACK '${name}'`); - case "mssql": - throw Error(`MSSQL distributed transaction is automatically rolled back.`); - case "sqlite": - throw Error(`SQLite dont support distributed transactions.`); - default: - throw Error(`Unsupported adapter: ${adapter}.`); + if (!pool.getRollbackDistributedSQL) { + throw Error(`This adapter doesn't support distributed transactions.`); } + + const sql = pool.getRollbackDistributedSQL(name); + return await run_internal_transaction_sql(sql); }; // begin is not allowed on a transaction we need to use savepoint() instead transaction_sql.begin = function () { if (distributed) { - throw $ERR_POSTGRES_INVALID_TRANSACTION_STATE("cannot call begin inside a distributed transaction"); + throw new PostgresError("cannot call begin inside a distributed transaction", { + code: "ERR_POSTGRES_INVALID_TRANSACTION_STATE", + }); } - throw $ERR_POSTGRES_INVALID_TRANSACTION_STATE("cannot call begin inside a transaction use savepoint() instead"); + throw new PostgresError("cannot call begin inside a transaction use savepoint() instead", { + code: "POSTGRES_INVALID_TRANSACTION_STATE", + }); }; transaction_sql.beginDistributed = function () { if (distributed) { - throw $ERR_POSTGRES_INVALID_TRANSACTION_STATE("cannot call beginDistributed inside a distributed transaction"); + throw new PostgresError("cannot call beginDistributed inside a distributed transaction", { + code: "ERR_POSTGRES_INVALID_TRANSACTION_STATE", + }); } - throw $ERR_POSTGRES_INVALID_TRANSACTION_STATE( - "cannot call beginDistributed inside a transaction use savepoint() instead", - ); + throw new PostgresError("cannot call beginDistributed inside a transaction use savepoint() instead", { + code: "POSTGRES_INVALID_TRANSACTION_STATE", + }); }; transaction_sql.flush = function () { if (state.connectionState & ReservedConnectionState.closed) { throw connectionClosedError(); } - return pooledConnection.flush(); + // Use pooled connection's flush if available, otherwise use adapter's flush + if (pooledConnection.flush) { + return pooledConnection.flush(); + } + return pool.flush(); }; transaction_sql.close = async function (options?: { timeout?: number }) { // we dont actually close the connection here, we just set the state to closed and rollback the transaction @@ -2155,7 +683,7 @@ function SQL(o, e = {}) { const pending_savepoints = Array.from(transactionSavepoints); const timer = setTimeout(async () => { for (const query of transactionQueries) { - (query as Query).cancel(); + (query as Query).cancel(); } if (BEFORE_COMMIT_OR_ROLLBACK_COMMAND) { await run_internal_transaction_sql(BEFORE_COMMIT_OR_ROLLBACK_COMMAND); @@ -2173,7 +701,7 @@ function SQL(o, e = {}) { } } for (const query of transactionQueries) { - (query as Query).cancel(); + (query as Query).cancel(); } if (BEFORE_COMMIT_OR_ROLLBACK_COMMAND) { await run_internal_transaction_sql(BEFORE_COMMIT_OR_ROLLBACK_COMMAND); @@ -2212,7 +740,9 @@ function SQL(o, e = {}) { } if (distributed) { transaction_sql.savepoint = async (_fn: TransactionCallback, _name?: string): Promise => { - throw $ERR_POSTGRES_INVALID_TRANSACTION_STATE("cannot call savepoint inside a distributed transaction"); + throw new PostgresError("cannot call savepoint inside a distributed transaction", { + code: "ERR_POSTGRES_INVALID_TRANSACTION_STATE", + }); }; } else { transaction_sql.savepoint = async (fn: TransactionCallback, name?: string): Promise => { @@ -2269,13 +799,19 @@ function SQL(o, e = {}) { return reject(err); } finally { state.connectionState |= ReservedConnectionState.closed; - pooledConnection.queries.delete(onClose); + // Use adapter method to detach connection close handler + if (pool.detachConnectionCloseHandler) { + pool.detachConnectionCloseHandler(pooledConnection, onClose); + } if (!dontRelease) { pool.release(pooledConnection); } } } - function sql(strings, ...values) { + function sql( + strings: string | TemplateStringsArray | import("internal/sql/shared.ts").SQLHelper | Query, + ...values: any[] + ) { if ($isArray(strings)) { // detect if is tagged template if (!$isArray((strings as unknown as TemplateStringsArray).raw)) { @@ -2298,11 +834,18 @@ function SQL(o, e = {}) { return unsafeQuery(text, args); }); }; + sql.reserve = () => { if (pool.closed) { return Promise.reject(connectionClosedError()); } + // Check if adapter supports reserved connections + if (pool.supportsReservedConnections && !pool.supportsReservedConnections()) { + return Promise.reject(new Error("This adapter doesn't support connection reservation")); + } + + // Try to reserve a connection - adapters that support it will handle appropriately const promiseWithResolvers = Promise.withResolvers(); pool.connect(onReserveConnected.bind(promiseWithResolvers), true); return promiseWithResolvers.promise; @@ -2311,40 +854,26 @@ function SQL(o, e = {}) { if (pool.closed) { throw connectionClosedError(); } - assertValidTransactionName(name); - const adapter = connectionInfo.adapter; - switch (adapter) { - case "postgres": - return await sql.unsafe(`ROLLBACK PREPARED '${name}'`); - case "mysql": - return await sql.unsafe(`XA ROLLBACK '${name}'`); - case "mssql": - throw Error(`MSSQL distributed transaction is automatically rolled back.`); - case "sqlite": - throw Error(`SQLite dont support distributed transactions.`); - default: - throw Error(`Unsupported adapter: ${adapter}.`); + + if (!pool.getRollbackDistributedSQL) { + throw Error(`This adapter doesn't support distributed transactions.`); } + + const sqlQuery = pool.getRollbackDistributedSQL(name); + return await sql.unsafe(sqlQuery); }; sql.commitDistributed = async function (name: string) { if (pool.closed) { throw connectionClosedError(); } - assertValidTransactionName(name); - const adapter = connectionInfo.adapter; - switch (adapter) { - case "postgres": - return await sql.unsafe(`COMMIT PREPARED '${name}'`); - case "mysql": - return await sql.unsafe(`XA COMMIT '${name}'`); - case "mssql": - throw Error(`MSSQL distributed transaction is automatically committed.`); - case "sqlite": - throw Error(`SQLite dont support distributed transactions.`); - default: - throw Error(`Unsupported adapter: ${adapter}.`); + + if (!pool.getCommitDistributedSQL) { + throw Error(`This adapter doesn't support distributed transactions.`); } + + const sqlQuery = pool.getCommitDistributedSQL(name); + return await sql.unsafe(sqlQuery); }; sql.beginDistributed = (name: string, fn: TransactionCallback) => { @@ -2361,8 +890,8 @@ function SQL(o, e = {}) { return Promise.reject($ERR_INVALID_ARG_VALUE("fn", callback, "must be a function")); } const { promise, resolve, reject } = Promise.withResolvers(); - // lets just reuse the same code path as the transaction begin - pool.connect(onTransactionConnected.bind(null, callback, name, resolve, reject, false, true), true); + const useReserved = pool.supportsReservedConnections?.() ?? true; + pool.connect(onTransactionConnected.bind(null, callback, name, resolve, reject, false, true), useReserved); return promise; }; @@ -2382,7 +911,8 @@ function SQL(o, e = {}) { return Promise.reject($ERR_INVALID_ARG_VALUE("fn", callback, "must be a function")); } const { promise, resolve, reject } = Promise.withResolvers(); - pool.connect(onTransactionConnected.bind(null, callback, options, resolve, reject, false, false), true); + const useReserved = pool.supportsReservedConnections?.() ?? true; + pool.connect(onTransactionConnected.bind(null, callback, options, resolve, reject, false, false), useReserved); return promise; }; sql.connect = () => { @@ -2422,9 +952,9 @@ function SQL(o, e = {}) { sql.distributed = sql.beginDistributed; sql.end = sql.close; return sql; -} +}; -var lazyDefaultSQL: InstanceType; +var lazyDefaultSQL: Bun.SQL; function resetDefaultSQL(sql) { lazyDefaultSQL = sql; @@ -2439,15 +969,17 @@ function ensureDefaultSQL() { } } -var defaultSQLObject: InstanceType = function sql(strings, ...values) { +var defaultSQLObject: Bun.SQL = function sql(strings, ...values) { if (new.target) { return SQL(strings); } + if (!lazyDefaultSQL) { resetDefaultSQL(SQL(undefined)); } + return lazyDefaultSQL(strings, ...values); -} as typeof BunTypes.SQL; +} as Bun.SQL; defaultSQLObject.reserve = (...args) => { ensureDefaultSQL(); @@ -2484,7 +1016,7 @@ defaultSQLObject.file = (filename: string, ...args) => { defaultSQLObject.transaction = defaultSQLObject.begin = function (...args: Parameters) { ensureDefaultSQL(); return lazyDefaultSQL.begin(...args); -} as (typeof BunTypes.SQL)["begin"]; +} as Bun.SQL["begin"]; defaultSQLObject.end = defaultSQLObject.close = (...args: Parameters) => { ensureDefaultSQL(); @@ -2510,12 +1042,45 @@ defineProperties(defaultSQLObject, { }, }); -var exportsObject = { +SQL.SQLError = SQLError; +SQL.PostgresError = PostgresError; +SQL.SQLiteError = SQLiteError; + +// // Helper functions for native code to create error instances +// // These are internal functions used by Zig/C++ code +// export function $createPostgresError( +// message: string, +// code: string, +// detail: string, +// hint: string, +// severity: string, +// additionalFields?: Record, +// ) { +// const options = { +// code, +// detail, +// hint, +// severity, +// ...additionalFields, +// }; +// return new PostgresError(message, options); +// } + +// export function $createSQLiteError(message: string, code: string, errno: number) { +// return new SQLiteError(message, { code, errno }); +// } + +// export function $createSQLError(message: string) { +// return new SQLError(message); +// } + +export default { sql: defaultSQLObject, default: defaultSQLObject, SQL, Query, postgres: SQL, + SQLError, + PostgresError, + SQLiteError, }; - -export default exportsObject; diff --git a/src/js/bun/sqlite.ts b/src/js/bun/sqlite.ts index 3fbc4a61a3..395c0cd70b 100644 --- a/src/js/bun/sqlite.ts +++ b/src/js/bun/sqlite.ts @@ -4,10 +4,10 @@ import type * as SqliteTypes from "bun:sqlite"; const kSafeIntegersFlag = 1 << 1; const kStrictFlag = 1 << 2; -var defineProperties = Object.defineProperties; -var toStringTag = Symbol.toStringTag; -var isArray = Array.isArray; -var isTypedArray = ArrayBuffer.isView; +const defineProperties = Object.defineProperties; +const toStringTag = Symbol.toStringTag; +const isArray = Array.isArray; +const isTypedArray = ArrayBuffer.isView; let internalFieldTuple; @@ -94,12 +94,41 @@ const constants = { SQLITE_FCNTL_RESET_CACHE: 42, }; -var SQL; +// This is interface is the JS equivalent of what JSSQLStatement.cpp defines +interface CppSQLStatement { + run: (...args: TODO[]) => TODO; + get: (...args: TODO[]) => TODO; + all: (...args: TODO[]) => TODO; + iterate: (...args: TODO[]) => TODO; + as: (...args: TODO[]) => TODO; + values: (...args: TODO[]) => TODO; + raw: (...args: TODO[]) => TODO; + finalize: (...args: TODO[]) => TODO; + toString: (...args: TODO[]) => TODO; + columns: string[]; + columnsCount: number; + paramsCount: number; + columnTypes: string[]; + declaredTypes: (string | null)[]; + safeIntegers: boolean; +} -var controllers; +interface CppSQL { + open(filename: string, flags: number, db: Database): TODO; + isInTransaction(handle: TODO): boolean; + loadExtension(handle: TODO, name: string, entryPoint: string): void; + serialize(handle: TODO, name: string): Buffer; + deserialize(serialized: NodeJS.TypedArray | ArrayBufferLike, openFlags: number, deserializeFlags: number): TODO; + fcntl(handle: TODO, ...args: TODO[]): TODO; + close(handle: TODO, throwOnError: boolean): void; + setCustomSQLite(path: string): void; +} + +let SQL: CppSQL; +let controllers: WeakMap | undefined; class Statement { - constructor(raw) { + constructor(raw: CppSQLStatement) { this.#raw = raw; switch (raw.paramsCount) { @@ -108,6 +137,7 @@ class Statement { this.all = this.#allNoArgs; this.iterate = this.#iterateNoArgs; this.values = this.#valuesNoArgs; + this.raw = this.#rawNoArgs; this.run = this.#runNoArgs; break; } @@ -116,18 +146,20 @@ class Statement { this.all = this.#all; this.iterate = this.#iterate; this.values = this.#values; + this.raw = this.#rawValues; this.run = this.#run; break; } } } - #raw; + #raw: CppSQLStatement; get: SqliteTypes.Statement["get"]; all: SqliteTypes.Statement["all"]; iterate: SqliteTypes.Statement["iterate"]; values: SqliteTypes.Statement["values"]; + raw: SqliteTypes.Statement["raw"]; run: SqliteTypes.Statement["run"]; isFinalized = false; @@ -170,6 +202,10 @@ class Statement { return this.#raw.values(); } + #rawNoArgs() { + return this.#raw.raw(); + } + #runNoArgs() { this.#raw.run(internalFieldTuple); @@ -191,7 +227,6 @@ class Statement { return this; } - // eslint-disable-next-line no-unused-private-class-members #get(...args) { if (args.length === 0) return this.#getNoArgs(); var arg0 = args[0]; @@ -204,7 +239,6 @@ class Statement { : this.#raw.get(...args); } - // eslint-disable-next-line no-unused-private-class-members #all(...args) { if (args.length === 0) return this.#allNoArgs(); var arg0 = args[0]; @@ -217,7 +251,6 @@ class Statement { : this.#raw.all(...args); } - // eslint-disable-next-line no-unused-private-class-members *#iterate(...args) { if (args.length === 0) return yield* this.#iterateNoArgs(); var arg0 = args[0]; @@ -234,7 +267,6 @@ class Statement { } } - // eslint-disable-next-line no-unused-private-class-members #values(...args) { if (args.length === 0) return this.#valuesNoArgs(); var arg0 = args[0]; @@ -247,7 +279,18 @@ class Statement { : this.#raw.values(...args); } - // eslint-disable-next-line no-unused-private-class-members + #rawValues(...args) { + if (args.length === 0) return this.#rawNoArgs(); + var arg0 = args[0]; + // ["foo"] => ["foo"] + // ("foo") => ["foo"] + // (Uint8Array(1024)) => [Uint8Array] + // (123) => [123] + return !isArray(arg0) && (!arg0 || typeof arg0 !== "object" || isTypedArray(arg0)) + ? this.#raw.raw(args) + : this.#raw.raw(...args); + } + #run(...args) { if (args.length === 0) { this.#runNoArgs(); @@ -295,9 +338,13 @@ class Statement { } } -var cachedCount = Symbol.for("Bun.Database.cache.count"); -class Database { - constructor(filenameGiven, options) { +const cachedCount = Symbol.for("Bun.Database.cache.count"); + +class Database implements SqliteTypes.Database { + constructor( + filenameGiven: string | undefined | NodeJS.TypedArray | Buffer, + options?: SqliteTypes.DatabaseOptions | number, + ) { if (typeof filenameGiven === "undefined") { } else if (typeof filenameGiven !== "string") { if (isTypedArray(filenameGiven)) { @@ -398,11 +445,11 @@ class Database { return SQL.loadExtension(this.#handle, name, entryPoint); } - serialize(optionalName) { + serialize(optionalName?: string) { return SQL.serialize(this.#handle, optionalName || "main"); } - static #deserialize(serialized, openFlags, deserializeFlags) { + static #deserialize(serialized: NodeJS.TypedArray | ArrayBufferLike, openFlags: number, deserializeFlags: number) { if (!SQL) { initializeSQL(); } @@ -411,7 +458,7 @@ class Database { } static deserialize( - serialized, + serialized: NodeJS.TypedArray | ArrayBufferLike, options: boolean | { readonly?: boolean; strict?: boolean; safeIntegers?: boolean } = false, ) { if (typeof options === "boolean") { @@ -476,7 +523,7 @@ class Database { return createChangesObject(); } - prepare(query, params, flags) { + prepare(query: string, params: any[] | undefined, flags: number = 0) { return new Statement(SQL.prepare(this.#handle, query, params, flags || 0, this.#internalFlags)); } diff --git a/src/js/internal/shared.ts b/src/js/internal/shared.ts index 5827c78288..984885fb4b 100644 --- a/src/js/internal/shared.ts +++ b/src/js/internal/shared.ts @@ -26,7 +26,7 @@ function throwNotImplemented(feature: string, issue?: number, extra?: string): n throw new NotImplementedError(feature, issue, extra); } -function hideFromStack(...fns) { +function hideFromStack(...fns: Function[]) { for (const fn of fns) { Object.defineProperty(fn, "name", { value: "::bunternal::", @@ -34,7 +34,7 @@ function hideFromStack(...fns) { } } -let warned; +let warned: Set; function warnNotImplementedOnce(feature: string, issue?: number) { if (!warned) { warned = new Set(); @@ -47,16 +47,14 @@ function warnNotImplementedOnce(feature: string, issue?: number) { console.warn(new NotImplementedError(feature, issue)); } -// - let util: typeof import("node:util"); class ExceptionWithHostPort extends Error { errno: number; syscall: string; port?: number; - address; + address: string; - constructor(err, syscall, address, port) { + constructor(err: number, syscall: string, address: string, port?: number) { // TODO(joyeecheung): We have to use the type-checked // getSystemErrorName(err) to guard against invalid arguments from users. // This can be replaced with [ code ] = errmap.get(err) when this method @@ -94,6 +92,9 @@ class NodeAggregateError extends AggregateError { } class ErrnoException extends Error { + errno: number; + syscall: string; + constructor(err, syscall, original) { util ??= require("node:util"); const code = util.getSystemErrorName(err); diff --git a/src/js/internal/sql/errors.ts b/src/js/internal/sql/errors.ts new file mode 100644 index 0000000000..a2f5d5a98a --- /dev/null +++ b/src/js/internal/sql/errors.ts @@ -0,0 +1,95 @@ +class SQLError extends Error implements Bun.SQL.SQLError { + constructor(message: string) { + super(message); + this.name = "SQLError"; + } +} + +export interface PostgresErrorOptions { + code: string; + + detail?: string | undefined; + hint?: string | undefined; + severity?: string | undefined; + errno?: string | undefined; + position?: string | undefined; + internalPosition?: string | undefined; + internalQuery?: string | undefined; + where?: string | undefined; + schema?: string | undefined; + table?: string | undefined; + column?: string | undefined; + dataType?: string | undefined; + constraint?: string | undefined; + file?: string | undefined; + line?: string | undefined; + routine?: string | undefined; +} + +class PostgresError extends SQLError implements Bun.SQL.PostgresError { + public readonly code: string; + public readonly detail: string | undefined; + public readonly hint: string | undefined; + public readonly severity: string | undefined; + public readonly errno: string | undefined; + public readonly position: string | undefined; + public readonly internalPosition: string | undefined; + public readonly internalQuery: string | undefined; + public readonly where: string | undefined; + public readonly schema: string | undefined; + public readonly table: string | undefined; + public readonly column: string | undefined; + public readonly dataType: string | undefined; + public readonly constraint: string | undefined; + public readonly file: string | undefined; + public readonly line: string | undefined; + public readonly routine: string | undefined; + + constructor(message: string, options: PostgresErrorOptions) { + super(message); + + this.name = "PostgresError"; + this.code = options.code; + + if (options.detail !== undefined) this.detail = options.detail; + if (options.hint !== undefined) this.hint = options.hint; + if (options.severity !== undefined) this.severity = options.severity; + if (options.errno !== undefined) this.errno = options.errno; + if (options.position !== undefined) this.position = options.position; + if (options.internalPosition !== undefined) this.internalPosition = options.internalPosition; + if (options.internalQuery !== undefined) this.internalQuery = options.internalQuery; + if (options.where !== undefined) this.where = options.where; + if (options.schema !== undefined) this.schema = options.schema; + if (options.table !== undefined) this.table = options.table; + if (options.column !== undefined) this.column = options.column; + if (options.dataType !== undefined) this.dataType = options.dataType; + if (options.constraint !== undefined) this.constraint = options.constraint; + if (options.file !== undefined) this.file = options.file; + if (options.line !== undefined) this.line = options.line; + if (options.routine !== undefined) this.routine = options.routine; + } +} + +export interface SQLiteErrorOptions { + code: string; + errno: number; + byteOffset?: number | undefined; +} + +class SQLiteError extends SQLError implements Bun.SQL.SQLiteError { + public readonly code: string; + public readonly errno: number; + public readonly byteOffset: number | undefined; + + constructor(message: string, options: SQLiteErrorOptions) { + super(message); + + this.name = "SQLiteError"; + this.code = options.code; + this.errno = options.errno; + + if (options.byteOffset !== undefined) this.byteOffset = options.byteOffset; + } +} + +export default { PostgresError, SQLError, SQLiteError }; diff --git a/src/js/internal/sql/postgres.ts b/src/js/internal/sql/postgres.ts new file mode 100644 index 0000000000..24f44e8cae --- /dev/null +++ b/src/js/internal/sql/postgres.ts @@ -0,0 +1,1171 @@ +import type { Query } from "./query"; +import type { DatabaseAdapter, SQLHelper, SQLResultArray, SSLMode } from "./shared"; + +const { SQLHelper, SSLMode, SQLResultArray } = require("internal/sql/shared"); +const { + Query, + SQLQueryFlags, + symbols: { _strings, _values, _flags, _results, _handle }, +} = require("internal/sql/query"); +const { escapeIdentifier, connectionClosedError } = require("internal/sql/utils"); +const { PostgresError } = require("internal/sql/errors"); + +const { + createConnection: createPostgresConnection, + createQuery: createPostgresQuery, + init: initPostgres, +} = $zig("postgres.zig", "createBinding") as PostgresDotZig; + +const cmds = ["", "INSERT", "DELETE", "UPDATE", "MERGE", "SELECT", "MOVE", "FETCH", "COPY"]; + +initPostgres( + function onResolvePostgresQuery(query, result, commandTag, count, queries, is_last) { + /// simple queries + if (query[_flags] & SQLQueryFlags.simple) { + // simple can have multiple results or a single result + if (is_last) { + if (queries) { + const queriesIndex = queries.indexOf(query); + if (queriesIndex !== -1) { + queries.splice(queriesIndex, 1); + } + } + try { + query.resolve(query[_results]); + } catch {} + return; + } + $assert(result instanceof SQLResultArray, "Invalid result array"); + // prepare for next query + query[_handle].setPendingValue(new SQLResultArray()); + + if (typeof commandTag === "string") { + if (commandTag.length > 0) { + result.command = commandTag; + } + } else { + result.command = cmds[commandTag]; + } + + result.count = count || 0; + const last_result = query[_results]; + + if (!last_result) { + query[_results] = result; + } else { + if (last_result instanceof SQLResultArray) { + // multiple results + query[_results] = [last_result, result]; + } else { + // 3 or more results + last_result.push(result); + } + } + return; + } + /// prepared statements + $assert(result instanceof SQLResultArray, "Invalid result array"); + if (typeof commandTag === "string") { + if (commandTag.length > 0) { + result.command = commandTag; + } + } else { + result.command = cmds[commandTag]; + } + + result.count = count || 0; + if (queries) { + const queriesIndex = queries.indexOf(query); + if (queriesIndex !== -1) { + queries.splice(queriesIndex, 1); + } + } + try { + query.resolve(result); + } catch {} + }, + + function onRejectPostgresQuery(query: Query, reject: Error, queries: Query[]) { + if (queries) { + const queriesIndex = queries.indexOf(query); + if (queriesIndex !== -1) { + queries.splice(queriesIndex, 1); + } + } + + try { + query.reject(reject); + } catch {} + }, +); + +export interface PostgresDotZig { + init: ( + onResolveQuery: ( + query: Query, + result: SQLResultArray, + commandTag: string, + count: number, + queries: any, + is_last: boolean, + ) => void, + onRejectQuery: (query: Query, err: Error, queries) => void, + ) => void; + createConnection: ( + hostname: string | undefined, + port: number, + username: string, + password: string, + databae: string, + sslmode: SSLMode, + tls: Bun.TLSOptions | boolean | null, // boolean true => empty TLSOptions object `{}`, boolean false or null => nothing + query: string, + path: string, + onConnected: (err: Error | null, connection: $ZigGeneratedClasses.PostgresSQLConnection) => void, + onDisconnected: (err: Error | null, connection: $ZigGeneratedClasses.PostgresSQLConnection) => void, + idleTimeout: number, + connectionTimeout: number, + maxLifetime: number, + useUnnamedPreparedStatements: boolean, + ) => $ZigGeneratedClasses.PostgresSQLConnection; + createQuery: ( + sql: string, + values: unknown[], + pendingValue: SQLResultArray, + columns: string[] | undefined, + bigint: boolean, + simple: boolean, + ) => $ZigGeneratedClasses.PostgresSQLQuery; +} + +const enum SQLCommand { + insert = 0, + update = 1, + updateSet = 2, + where = 3, + whereIn = 4, + none = -1, +} +export type { SQLCommand }; + +function commandToString(command: SQLCommand): string { + switch (command) { + case SQLCommand.insert: + return "INSERT"; + case SQLCommand.updateSet: + case SQLCommand.update: + return "UPDATE"; + case SQLCommand.whereIn: + case SQLCommand.where: + return "WHERE"; + default: + return ""; + } +} + +function detectCommand(query: string): SQLCommand { + const text = query.toLowerCase().trim(); + const text_len = text.length; + + let token = ""; + let command = SQLCommand.none; + let quoted = false; + for (let i = 0; i < text_len; i++) { + const char = text[i]; + switch (char) { + case " ": // Space + case "\n": // Line feed + case "\t": // Tab character + case "\r": // Carriage return + case "\f": // Form feed + case "\v": { + switch (token) { + case "insert": { + if (command === SQLCommand.none) { + return SQLCommand.insert; + } + return command; + } + case "update": { + if (command === SQLCommand.none) { + command = SQLCommand.update; + token = ""; + continue; // try to find SET + } + return command; + } + case "where": { + command = SQLCommand.where; + token = ""; + continue; // try to find IN + } + case "set": { + if (command === SQLCommand.update) { + command = SQLCommand.updateSet; + token = ""; + continue; // try to find WHERE + } + return command; + } + case "in": { + if (command === SQLCommand.where) { + return SQLCommand.whereIn; + } + return command; + } + default: { + token = ""; + continue; + } + } + } + default: { + // skip quoted commands + if (char === '"') { + quoted = !quoted; + continue; + } + if (!quoted) { + token += char; + } + } + } + } + if (token) { + switch (command) { + case SQLCommand.none: { + switch (token) { + case "insert": + return SQLCommand.insert; + case "update": + return SQLCommand.update; + case "where": + return SQLCommand.where; + default: + return SQLCommand.none; + } + } + case SQLCommand.update: { + if (token === "set") { + return SQLCommand.updateSet; + } + return SQLCommand.update; + } + case SQLCommand.where: { + if (token === "in") { + return SQLCommand.whereIn; + } + return SQLCommand.where; + } + } + } + + return command; +} + +const enum PooledConnectionState { + pending = 0, + connected = 1, + closed = 2, +} + +const enum PooledConnectionFlags { + /// canBeConnected is used to indicate that at least one time we were able to connect to the database + canBeConnected = 1 << 0, + /// reserved is used to indicate that the connection is currently reserved + reserved = 1 << 1, + /// preReserved is used to indicate that the connection will be reserved in the future when queryCount drops to 0 + preReserved = 1 << 2, +} + +function onQueryFinish(this: PooledPostgresConnection, onClose: (err: Error) => void) { + this.queries.delete(onClose); + this.adapter.release(this); +} + +class PooledPostgresConnection { + private static async createConnection( + options: Bun.SQL.__internal.DefinedPostgresOptions, + onConnected: (err: Error | null, connection: $ZigGeneratedClasses.PostgresSQLConnection) => void, + onClose: (err: Error | null) => void, + ): Promise<$ZigGeneratedClasses.PostgresSQLConnection | null> { + const { + hostname, + port, + username, + tls, + query, + database, + sslMode, + idleTimeout = 0, + connectionTimeout = 30 * 1000, + maxLifetime = 0, + prepare = true, + + // @ts-expect-error path is currently removed from the types + path, + } = options; + + let password: Bun.MaybePromise | string | undefined | (() => Bun.MaybePromise) = options.password; + + try { + if (typeof password === "function") { + password = password(); + + if (password && $isPromise(password)) { + password = await password; + } + } + + return createPostgresConnection( + hostname, + Number(port), + username || "", + password || "", + database || "", + // > The default value for sslmode is prefer. As is shown in the table, this + // makes no sense from a security point of view, and it only promises + // performance overhead if possible. It is only provided as the default for + // backward compatibility, and is not recommended in secure deployments. + sslMode || SSLMode.disable, + tls || null, + query || "", + path || "", + onConnected, + onClose, + idleTimeout, + connectionTimeout, + maxLifetime, + !prepare, + ); + } catch (e) { + onClose(e as Error); + return null; + } + } + + adapter: PostgresAdapter; + connection: $ZigGeneratedClasses.PostgresSQLConnection | null = null; + state: PooledConnectionState = PooledConnectionState.pending; + storedError: Error | null = null; + queries: Set<(err: Error) => void> = new Set(); + onFinish: ((err: Error | null) => void) | null = null; + connectionInfo: Bun.SQL.__internal.DefinedPostgresOptions; + flags: number = 0; + /// queryCount is used to indicate the number of queries using the connection, if a connection is reserved or if its a transaction queryCount will be 1 independently of the number of queries + queryCount: number = 0; + + #onConnected(err, _) { + const connectionInfo = this.connectionInfo; + if (connectionInfo?.onconnect) { + connectionInfo.onconnect(err); + } + this.storedError = err; + if (!err) { + this.flags |= PooledConnectionFlags.canBeConnected; + } + this.state = err ? PooledConnectionState.closed : PooledConnectionState.connected; + const onFinish = this.onFinish; + if (onFinish) { + this.queryCount = 0; + this.flags &= ~PooledConnectionFlags.reserved; + this.flags &= ~PooledConnectionFlags.preReserved; + + // pool is closed, lets finish the connection + // pool is closed, lets finish the connection + if (err) { + onFinish(err); + } else { + this.connection?.close(); + } + return; + } + this.adapter.release(this, true); + } + + #onClose(err) { + const connectionInfo = this.connectionInfo; + if (connectionInfo?.onclose) { + connectionInfo.onclose(err); + } + this.state = PooledConnectionState.closed; + this.connection = null; + this.storedError = err; + + // remove from ready connections if its there + this.adapter.readyConnections.delete(this); + const queries = new Set(this.queries); + this.queries.clear(); + this.queryCount = 0; + this.flags &= ~PooledConnectionFlags.reserved; + + // notify all queries that the connection is closed + for (const onClose of queries) { + onClose(err); + } + const onFinish = this.onFinish; + if (onFinish) { + onFinish(err); + } + + this.adapter.release(this, true); + } + + constructor(connectionInfo: Bun.SQL.__internal.DefinedPostgresOptions, adapter: PostgresAdapter) { + this.state = PooledConnectionState.pending; + this.adapter = adapter; + this.connectionInfo = connectionInfo; + this.#startConnection(); + } + + async #startConnection() { + this.connection = await PooledPostgresConnection.createConnection( + this.connectionInfo, + this.#onConnected.bind(this), + this.#onClose.bind(this), + ); + } + + onClose(onClose: (err: Error) => void) { + this.queries.add(onClose); + } + + bindQuery(query: Query, onClose: (err: Error) => void) { + this.queries.add(onClose); + query.finally(onQueryFinish.bind(this, onClose)); + } + + #doRetry() { + if (this.adapter.closed) { + return; + } + // reset error and state + this.storedError = null; + this.state = PooledConnectionState.pending; + // retry connection + this.#startConnection(); + } + close() { + try { + if (this.state === PooledConnectionState.connected) { + this.connection?.close(); + } + } catch {} + } + flush() { + this.connection?.flush(); + } + retry() { + // if pool is closed, we can't retry + if (this.adapter.closed) { + return false; + } + // we need to reconnect + // lets use a retry strategy + + // we can only retry if one day we are able to connect + if (this.flags & PooledConnectionFlags.canBeConnected) { + this.#doRetry(); + } else { + // analyse type of error to see if we can retry + switch (this.storedError?.code) { + case "ERR_POSTGRES_UNSUPPORTED_AUTHENTICATION_METHOD": + case "ERR_POSTGRES_UNKNOWN_AUTHENTICATION_METHOD": + case "ERR_POSTGRES_TLS_NOT_AVAILABLE": + case "ERR_POSTGRES_TLS_UPGRADE_FAILED": + case "ERR_POSTGRES_INVALID_SERVER_SIGNATURE": + case "ERR_POSTGRES_INVALID_SERVER_KEY": + case "ERR_POSTGRES_AUTHENTICATION_FAILED_PBKDF2": + // we can't retry these are authentication errors + return false; + default: + // we can retry + this.#doRetry(); + } + } + return true; + } +} + +export class PostgresAdapter + implements + DatabaseAdapter< + PooledPostgresConnection, + $ZigGeneratedClasses.PostgresSQLConnection, + $ZigGeneratedClasses.PostgresSQLQuery + > +{ + public readonly connectionInfo: Bun.SQL.__internal.DefinedPostgresOptions; + + public readonly connections: PooledPostgresConnection[]; + public readonly readyConnections: Set; + + public waitingQueue: Array<(err: Error | null, result: any) => void> = []; + public reservedQueue: Array<(err: Error | null, result: any) => void> = []; + + public poolStarted: boolean = false; + public closed: boolean = false; + public totalQueries: number = 0; + public onAllQueriesFinished: (() => void) | null = null; + + constructor(connectionInfo: Bun.SQL.__internal.DefinedPostgresOptions) { + this.connectionInfo = connectionInfo; + this.connections = new Array(connectionInfo.max); + this.readyConnections = new Set(); + } + + supportsReservedConnections() { + return true; + } + + getConnectionForQuery(pooledConnection: PooledPostgresConnection) { + return pooledConnection.connection; + } + + attachConnectionCloseHandler(connection: PooledPostgresConnection, handler: () => void): void { + // PostgreSQL pooled connections support onClose handlers + if (connection.onClose) { + connection.onClose(handler); + } + } + + detachConnectionCloseHandler(connection: PooledPostgresConnection, handler: () => void): void { + // PostgreSQL pooled connections track queries + if (connection.queries) { + connection.queries.delete(handler); + } + } + + getTransactionCommands(options?: string): import("./shared").TransactionCommands { + let BEGIN = "BEGIN"; + if (options) { + BEGIN = `BEGIN ${options}`; + } + + return { + BEGIN, + COMMIT: "COMMIT", + ROLLBACK: "ROLLBACK", + SAVEPOINT: "SAVEPOINT", + RELEASE_SAVEPOINT: "RELEASE SAVEPOINT", + ROLLBACK_TO_SAVEPOINT: "ROLLBACK TO SAVEPOINT", + }; + } + + getDistributedTransactionCommands(name: string): import("./shared").TransactionCommands | null { + if (!this.validateDistributedTransactionName(name).valid) { + return null; + } + + return { + BEGIN: "BEGIN", + COMMIT: `PREPARE TRANSACTION '${name}'`, + ROLLBACK: "ROLLBACK", + SAVEPOINT: "SAVEPOINT", + RELEASE_SAVEPOINT: "RELEASE SAVEPOINT", + ROLLBACK_TO_SAVEPOINT: "ROLLBACK TO SAVEPOINT", + BEFORE_COMMIT_OR_ROLLBACK: null, + }; + } + + validateTransactionOptions(_options: string): { valid: boolean; error?: string } { + // PostgreSQL accepts any transaction options + return { valid: true }; + } + + validateDistributedTransactionName(name: string): { valid: boolean; error?: string } { + if (name.indexOf("'") !== -1) { + return { + valid: false, + error: "Distributed transaction name cannot contain single quotes.", + }; + } + return { valid: true }; + } + + getCommitDistributedSQL(name: string): string { + const validation = this.validateDistributedTransactionName(name); + if (!validation.valid) { + throw new Error(validation.error); + } + return `COMMIT PREPARED '${name}'`; + } + + getRollbackDistributedSQL(name: string): string { + const validation = this.validateDistributedTransactionName(name); + if (!validation.valid) { + throw new Error(validation.error); + } + return `ROLLBACK PREPARED '${name}'`; + } + + createQueryHandle(sql: string, values: unknown[], flags: number) { + if (!(flags & SQLQueryFlags.allowUnsafeTransaction)) { + if (this.connectionInfo.max !== 1) { + const upperCaseSqlString = sql.toUpperCase().trim(); + if (upperCaseSqlString.startsWith("BEGIN") || upperCaseSqlString.startsWith("START TRANSACTION")) { + throw new PostgresError("Only use sql.begin, sql.reserved or max: 1", { + code: "ERR_POSTGRES_UNSAFE_TRANSACTION", + }); + } + } + } + + return createPostgresQuery( + sql, + values, + new SQLResultArray(), + undefined, + !!(flags & SQLQueryFlags.bigint), + !!(flags & SQLQueryFlags.simple), + ); + } + + maxDistribution() { + if (!this.waitingQueue.length) return 0; + const result = Math.ceil((this.waitingQueue.length + this.totalQueries) / this.connections.length); + return result ? result : 1; + } + + flushConcurrentQueries() { + const maxDistribution = this.maxDistribution(); + if (maxDistribution === 0) { + return; + } + + while (true) { + const nonReservedConnections = Array.from(this.readyConnections).filter( + c => !(c.flags & PooledConnectionFlags.preReserved) && c.queryCount < maxDistribution, + ); + if (nonReservedConnections.length === 0) { + return; + } + const orderedConnections = nonReservedConnections.sort((a, b) => a.queryCount - b.queryCount); + for (const connection of orderedConnections) { + const pending = this.waitingQueue.shift(); + if (!pending) { + return; + } + connection.queryCount++; + this.totalQueries++; + pending(null, connection); + } + } + } + + release(connection: PooledPostgresConnection, connectingEvent: boolean = false) { + if (!connectingEvent) { + connection.queryCount--; + this.totalQueries--; + } + const currentQueryCount = connection.queryCount; + if (currentQueryCount == 0) { + connection.flags &= ~PooledConnectionFlags.reserved; + connection.flags &= ~PooledConnectionFlags.preReserved; + } + if (this.onAllQueriesFinished) { + // we are waiting for all queries to finish, lets check if we can call it + if (!this.hasPendingQueries()) { + this.onAllQueriesFinished(); + } + } + + if (connection.state !== PooledConnectionState.connected) { + // connection is not ready + if (connection.storedError) { + // this connection got a error but maybe we can wait for another + + if (this.hasConnectionsAvailable()) { + return; + } + + const waitingQueue = this.waitingQueue; + const reservedQueue = this.reservedQueue; + + this.waitingQueue = []; + this.reservedQueue = []; + // we have no connections available so lets fails + for (const pending of waitingQueue) { + pending(connection.storedError, connection); + } + for (const pending of reservedQueue) { + pending(connection.storedError, connection); + } + } + return; + } + + if (currentQueryCount == 0) { + // ok we can actually bind reserved queries to it + const pendingReserved = this.reservedQueue.shift(); + if (pendingReserved) { + connection.flags |= PooledConnectionFlags.reserved; + connection.queryCount++; + this.totalQueries++; + // we have a connection waiting for a reserved connection lets prioritize it + pendingReserved(connection.storedError, connection); + return; + } + } + this.readyConnections.add(connection); + this.flushConcurrentQueries(); + } + + hasConnectionsAvailable() { + if (this.readyConnections.size > 0) return true; + if (this.poolStarted) { + const pollSize = this.connections.length; + for (let i = 0; i < pollSize; i++) { + const connection = this.connections[i]; + if (connection.state !== PooledConnectionState.closed) { + // some connection is connecting or connected + return true; + } + } + } + return false; + } + + hasPendingQueries() { + if (this.waitingQueue.length > 0 || this.reservedQueue.length > 0) return true; + if (this.poolStarted) { + return this.totalQueries > 0; + } + return false; + } + isConnected() { + if (this.readyConnections.size > 0) { + return true; + } + if (this.poolStarted) { + const pollSize = this.connections.length; + for (let i = 0; i < pollSize; i++) { + const connection = this.connections[i]; + if (connection.state === PooledConnectionState.connected) { + return true; + } + } + } + return false; + } + flush() { + if (this.closed) { + return; + } + if (this.poolStarted) { + const pollSize = this.connections.length; + for (let i = 0; i < pollSize; i++) { + const connection = this.connections[i]; + if (connection.state === PooledConnectionState.connected) { + connection.connection?.flush(); + } + } + } + } + + async #close() { + let pending; + while ((pending = this.waitingQueue.shift())) { + pending(connectionClosedError(), null); + } + while (this.reservedQueue.length > 0) { + const pendingReserved = this.reservedQueue.shift(); + if (pendingReserved) { + pendingReserved(connectionClosedError(), null); + } + } + + const promises: Array> = []; + + if (this.poolStarted) { + this.poolStarted = false; + const pollSize = this.connections.length; + for (let i = 0; i < pollSize; i++) { + const connection = this.connections[i]; + switch (connection.state) { + case PooledConnectionState.pending: + { + const { promise, resolve } = Promise.withResolvers(); + connection.onFinish = resolve; + promises.push(promise); + connection.connection?.close(); + } + break; + + case PooledConnectionState.connected: + { + const { promise, resolve } = Promise.withResolvers(); + connection.onFinish = resolve; + promises.push(promise); + connection.connection?.close(); + } + break; + } + // clean connection reference + // @ts-ignore + this.connections[i] = null; + } + } + + this.readyConnections.clear(); + this.waitingQueue.length = 0; + return Promise.all(promises); + } + + async close(options?: { timeout?: number }) { + if (this.closed) { + return; + } + + let timeout = options?.timeout; + if (timeout) { + timeout = Number(timeout); + if (timeout > 2 ** 31 || timeout < 0 || timeout !== timeout) { + throw $ERR_INVALID_ARG_VALUE("options.timeout", timeout, "must be a non-negative integer less than 2^31"); + } + + this.closed = true; + if (timeout === 0 || !this.hasPendingQueries()) { + // close immediately + await this.#close(); + return; + } + + const { promise, resolve } = Promise.withResolvers(); + const timer = setTimeout(() => { + // timeout is reached, lets close and probably fail some queries + this.#close().finally(resolve); + }, timeout * 1000); + timer.unref(); // dont block the event loop + + this.onAllQueriesFinished = () => { + clearTimeout(timer); + // everything is closed, lets close the pool + this.#close().finally(resolve); + }; + + return promise; + } else { + this.closed = true; + if (!this.hasPendingQueries()) { + // close immediately + await this.#close(); + return; + } + + // gracefully close the pool + const { promise, resolve } = Promise.withResolvers(); + + this.onAllQueriesFinished = () => { + // everything is closed, lets close the pool + this.#close().finally(resolve); + }; + + return promise; + } + } + + /** + * @param {function} onConnected - The callback function to be called when the connection is established. + * @param {boolean} reserved - Whether the connection is reserved, if is reserved the connection will not be released until release is called, if not release will only decrement the queryCount counter + */ + connect(onConnected: (err: Error | null, result: any) => void, reserved: boolean = false) { + if (this.closed) { + return onConnected(connectionClosedError(), null); + } + + if (this.readyConnections.size === 0) { + // no connection ready lets make some + let retry_in_progress = false; + let all_closed = true; + let storedError: Error | null = null; + + if (this.poolStarted) { + // we already started the pool + // lets check if some connection is available to retry + const pollSize = this.connections.length; + for (let i = 0; i < pollSize; i++) { + const connection = this.connections[i]; + // we need a new connection and we have some connections that can retry + if (connection.state === PooledConnectionState.closed) { + if (connection.retry()) { + // lets wait for connection to be released + if (!retry_in_progress) { + // avoid adding to the queue twice, we wanna to retry every available pool connection + retry_in_progress = true; + if (reserved) { + // we are not sure what connection will be available so we dont pre reserve + this.reservedQueue.push(onConnected); + } else { + this.waitingQueue.push(onConnected); + } + } + } else { + // we have some error, lets grab it and fail if unable to start a connection + storedError = connection.storedError; + } + } else { + // we have some pending or open connections + all_closed = false; + } + } + if (!all_closed && !retry_in_progress) { + // is possible to connect because we have some working connections, or we are just without network for some reason + // wait for connection to be released or fail + if (reserved) { + // we are not sure what connection will be available so we dont pre reserve + this.reservedQueue.push(onConnected); + } else { + this.waitingQueue.push(onConnected); + } + } else if (!retry_in_progress) { + // impossible to connect or retry + onConnected(storedError ?? connectionClosedError(), null); + } + return; + } + // we never started the pool, lets start it + if (reserved) { + this.reservedQueue.push(onConnected); + } else { + this.waitingQueue.push(onConnected); + } + this.poolStarted = true; + const pollSize = this.connections.length; + // pool is always at least 1 connection + const firstConnection = new PooledPostgresConnection(this.connectionInfo, this); + this.connections[0] = firstConnection; + if (reserved) { + firstConnection.flags |= PooledConnectionFlags.preReserved; // lets pre reserve the first connection + } + for (let i = 1; i < pollSize; i++) { + this.connections[i] = new PooledPostgresConnection(this.connectionInfo, this); + } + return; + } + if (reserved) { + let connectionWithLeastQueries: PooledPostgresConnection | null = null; + let leastQueries = Infinity; + for (const connection of this.readyConnections) { + if (connection.flags & PooledConnectionFlags.preReserved || connection.flags & PooledConnectionFlags.reserved) + continue; + const queryCount = connection.queryCount; + if (queryCount > 0) { + if (queryCount < leastQueries) { + leastQueries = queryCount; + connectionWithLeastQueries = connection; + } + continue; + } + connection.flags |= PooledConnectionFlags.reserved; + connection.queryCount++; + this.totalQueries++; + this.readyConnections.delete(connection); + onConnected(null, connection); + return; + } + + if (connectionWithLeastQueries) { + // lets mark the connection with the least queries as preReserved if any + connectionWithLeastQueries.flags |= PooledConnectionFlags.preReserved; + } + + // no connection available to be reserved lets wait for a connection to be released + this.reservedQueue.push(onConnected); + } else { + this.waitingQueue.push(onConnected); + this.flushConcurrentQueries(); + } + } + + normalizeQuery(strings: string | TemplateStringsArray, values: unknown[], binding_idx = 1): [string, unknown[]] { + if (typeof strings === "string") { + // identifier or unsafe query + return [strings, values || []]; + } + + if (!$isArray(strings)) { + // we should not hit this path + throw new SyntaxError("Invalid query: SQL Fragment cannot be executed or was misused"); + } + + const str_len = strings.length; + if (str_len === 0) { + return ["", []]; + } + + let binding_values: any[] = []; + let query = ""; + + for (let i = 0; i < str_len; i++) { + const string = strings[i]; + + if (typeof string === "string") { + query += string; + + if (values.length > i) { + const value = values[i]; + + if (value instanceof Query) { + const q = value as Query; + const [sub_query, sub_values] = this.normalizeQuery(q[_strings], q[_values], binding_idx); + + query += sub_query; + for (let j = 0; j < sub_values.length; j++) { + binding_values.push(sub_values[j]); + } + binding_idx += sub_values.length; + } else if (value instanceof SQLHelper) { + const command = detectCommand(query); + // only selectIn, insert, update, updateSet are allowed + if (command === SQLCommand.none || command === SQLCommand.where) { + throw new SyntaxError("Helpers are only allowed for INSERT, UPDATE and WHERE IN commands"); + } + const { columns, value: items } = value as SQLHelper; + const columnCount = columns.length; + if (columnCount === 0 && command !== SQLCommand.whereIn) { + throw new SyntaxError(`Cannot ${commandToString(command)} with no columns`); + } + const lastColumnIndex = columns.length - 1; + + if (command === SQLCommand.insert) { + // + // insert into users ${sql(users)} or insert into users ${sql(user)} + // + + query += "("; + for (let j = 0; j < columnCount; j++) { + query += escapeIdentifier(columns[j]); + if (j < lastColumnIndex) { + query += ", "; + } + } + query += ") VALUES"; + if ($isArray(items)) { + const itemsCount = items.length; + const lastItemIndex = itemsCount - 1; + for (let j = 0; j < itemsCount; j++) { + query += "("; + const item = items[j]; + for (let k = 0; k < columnCount; k++) { + const column = columns[k]; + const columnValue = item[column]; + query += `$${binding_idx++}${k < lastColumnIndex ? ", " : ""}`; + if (typeof columnValue === "undefined") { + binding_values.push(null); + } else { + binding_values.push(columnValue); + } + } + if (j < lastItemIndex) { + query += "),"; + } else { + query += ") "; // the user can add RETURNING * or RETURNING id + } + } + } else { + query += "("; + const item = items; + for (let j = 0; j < columnCount; j++) { + const column = columns[j]; + const columnValue = item[column]; + query += `$${binding_idx++}${j < lastColumnIndex ? ", " : ""}`; + if (typeof columnValue === "undefined") { + binding_values.push(null); + } else { + binding_values.push(columnValue); + } + } + query += ") "; // the user can add RETURNING * or RETURNING id + } + } else if (command === SQLCommand.whereIn) { + // SELECT * FROM users WHERE id IN (${sql([1, 2, 3])}) + if (!$isArray(items)) { + throw new SyntaxError("An array of values is required for WHERE IN helper"); + } + const itemsCount = items.length; + const lastItemIndex = itemsCount - 1; + query += "("; + for (let j = 0; j < itemsCount; j++) { + query += `$${binding_idx++}${j < lastItemIndex ? ", " : ""}`; + if (columnCount > 0) { + // we must use a key from a object + if (columnCount > 1) { + // we should not pass multiple columns here + throw new SyntaxError("Cannot use WHERE IN helper with multiple columns"); + } + // SELECT * FROM users WHERE id IN (${sql(users, "id")}) + const value = items[j]; + if (typeof value === "undefined") { + binding_values.push(null); + } else { + const value_from_key = value[columns[0]]; + + if (typeof value_from_key === "undefined") { + binding_values.push(null); + } else { + binding_values.push(value_from_key); + } + } + } else { + const value = items[j]; + if (typeof value === "undefined") { + binding_values.push(null); + } else { + binding_values.push(value); + } + } + } + query += ") "; // more conditions can be added after this + } else { + // UPDATE users SET ${sql({ name: "John", age: 31 })} WHERE id = 1 + let item; + if ($isArray(items)) { + if (items.length > 1) { + throw new SyntaxError("Cannot use array of objects for UPDATE"); + } + item = items[0]; + } else { + item = items; + } + // no need to include if is updateSet + if (command === SQLCommand.update) { + query += " SET "; + } + for (let i = 0; i < columnCount; i++) { + const column = columns[i]; + const columnValue = item[column]; + query += `${escapeIdentifier(column)} = $${binding_idx++}${i < lastColumnIndex ? ", " : ""}`; + if (typeof columnValue === "undefined") { + binding_values.push(null); + } else { + binding_values.push(columnValue); + } + } + query += " "; // the user can add where clause after this + } + } else { + //TODO: handle sql.array parameters + query += `$${binding_idx++} `; + if (typeof value === "undefined") { + binding_values.push(null); + } else { + binding_values.push(value); + } + } + } + } else { + throw new SyntaxError("Invalid query: SQL Fragment cannot be executed or was misused"); + } + } + + return [query, binding_values]; + } +} + +export default { + PostgresAdapter, + SQLCommand, + commandToString, + detectCommand, +}; diff --git a/src/js/internal/sql/query.ts b/src/js/internal/sql/query.ts new file mode 100644 index 0000000000..dedd2016cd --- /dev/null +++ b/src/js/internal/sql/query.ts @@ -0,0 +1,330 @@ +import type { DatabaseAdapter } from "./shared.ts"; +const { escapeIdentifier, notTaggedCallError } = require("internal/sql/utils"); + +const _resolve = Symbol("resolve"); +const _reject = Symbol("reject"); +const _handle = Symbol("handle"); +const _run = Symbol("run"); +const _queryStatus = Symbol("status"); +const _handler = Symbol("handler"); +const _strings = Symbol("strings"); +const _values = Symbol("values"); +const _flags = Symbol("flags"); +const _results = Symbol("results"); +const _adapter = Symbol("adapter"); + +const PublicPromise = Promise; + +export interface BaseQueryHandle { + done?(): void; + cancel?(): void; + setMode(mode: SQLQueryResultMode): void; + run(connection: Connection, query: Query): void | Promise; +} + +export type { Query }; +class Query> extends PublicPromise { + public [_resolve]: (value: T) => void; + public [_reject]: (reason?: Error) => void; + public [_handle]: Handle | null; + public [_handler]: (query: Query, handle: Handle) => T; + public [_queryStatus]: SQLQueryStatus; + public [_strings]: string | TemplateStringsArray; + public [_values]: any[]; + public [_flags]: SQLQueryFlags; + + public readonly [_adapter]: DatabaseAdapter; + + [Symbol.for("nodejs.util.inspect.custom")](): `Query { ${string} }` { + const status = this[_queryStatus]; + + let query = ""; + if ((status & SQLQueryStatus.active) != 0) query += "active "; + if ((status & SQLQueryStatus.cancelled) != 0) query += "cancelled "; + if ((status & SQLQueryStatus.executed) != 0) query += "executed "; + if ((status & SQLQueryStatus.error) != 0) query += "error "; + + return `Query { ${query.trimEnd()} }`; + } + + private getQueryHandle() { + let handle = this[_handle]; + + if (!handle) { + try { + const [sql, values] = this[_adapter].normalizeQuery(this[_strings], this[_values]); + this[_handle] = handle = this[_adapter].createQueryHandle(sql, values, this[_flags]); + } catch (err) { + this[_queryStatus] |= SQLQueryStatus.error | SQLQueryStatus.invalidHandle; + this.reject(err as Error); + } + } + + return handle; + } + + constructor( + strings: string | TemplateStringsArray, + values: any[], + flags: number, + handler, + adapter: DatabaseAdapter, + ) { + let resolve_: (value: T) => void, reject_: (reason?: any) => void; + + super((resolve, reject) => { + resolve_ = resolve; + reject_ = reject; + }); + + this[_adapter] = adapter; + + if (typeof strings === "string") { + if (!(flags & SQLQueryFlags.unsafe)) { + // identifier (cannot be executed in safe mode) + flags |= SQLQueryFlags.notTagged; + strings = escapeIdentifier(strings); + } + } + + this[_resolve] = resolve_!; + this[_reject] = reject_!; + this[_handle] = null; + this[_handler] = handler; + this[_queryStatus] = SQLQueryStatus.none; + this[_strings] = strings; + this[_values] = values; + this[_flags] = flags; + + this[_results] = null; + } + + async [_run](async: boolean) { + const { [_handler]: handler, [_queryStatus]: status } = this; + + if ( + status & + (SQLQueryStatus.executed | SQLQueryStatus.error | SQLQueryStatus.cancelled | SQLQueryStatus.invalidHandle) + ) { + return; + } + + if (this[_flags] & SQLQueryFlags.notTagged) { + this.reject(notTaggedCallError()); + return; + } + + this[_queryStatus] |= SQLQueryStatus.executed; + const handle = this.getQueryHandle(); + + if (!handle) { + return this; + } + + if (async) { + // Ensure it's actually async. This sort of forces a tick which prevents an infinite loop. + await (1 as never as Promise); + } + + try { + return handler(this, handle); + } catch (err) { + this[_queryStatus] |= SQLQueryStatus.error; + this.reject(err as Error); + } + } + + get active() { + return (this[_queryStatus] & SQLQueryStatus.active) != 0; + } + + set active(value) { + const status = this[_queryStatus]; + if (status & (SQLQueryStatus.cancelled | SQLQueryStatus.error)) { + return; + } + + if (value) { + this[_queryStatus] |= SQLQueryStatus.active; + } else { + this[_queryStatus] &= ~SQLQueryStatus.active; + } + } + + get cancelled() { + return (this[_queryStatus] & SQLQueryStatus.cancelled) !== 0; + } + + resolve(x: T) { + this[_queryStatus] &= ~SQLQueryStatus.active; + const handle = this.getQueryHandle(); + + if (!handle) { + return this; + } + + handle.done?.(); + + return this[_resolve](x); + } + + reject(x: Error) { + this[_queryStatus] &= ~SQLQueryStatus.active; + this[_queryStatus] |= SQLQueryStatus.error; + + if (!(this[_queryStatus] & SQLQueryStatus.invalidHandle)) { + const handle = this.getQueryHandle(); + + if (!handle) { + return this[_reject](x); + } + + handle.done?.(); + } + + return this[_reject](x); + } + + cancel() { + const status = this[_queryStatus]; + if (status & SQLQueryStatus.cancelled) { + return this; + } + + this[_queryStatus] |= SQLQueryStatus.cancelled; + + if (status & SQLQueryStatus.executed) { + const handle = this.getQueryHandle(); + + if (handle) { + handle.cancel?.(); + } + } + + return this; + } + + execute() { + this[_run](false); + return this; + } + + async run() { + if (this[_flags] & SQLQueryFlags.notTagged) { + throw notTaggedCallError(); + } + + await this[_run](true); + return this; + } + + raw() { + const handle = this.getQueryHandle(); + + if (!handle) { + return this; + } + + handle.setMode(SQLQueryResultMode.raw); + return this; + } + + simple() { + this[_flags] |= SQLQueryFlags.simple; + return this; + } + + values() { + const handle = this.getQueryHandle(); + + if (!handle) { + return this; + } + + handle.setMode(SQLQueryResultMode.values); + return this; + } + + then() { + if (this[_flags] & SQLQueryFlags.notTagged) { + throw notTaggedCallError(); + } + + this[_run](true); + + const result = super.$then.$apply(this, arguments); + $markPromiseAsHandled(result); + + return result; + } + + catch() { + if (this[_flags] & SQLQueryFlags.notTagged) { + throw notTaggedCallError(); + } + + this[_run](true); + + const result = super.catch.$apply(this, arguments); + $markPromiseAsHandled(result); + + return result; + } + + finally(_onfinally?: (() => void) | undefined | null) { + if (this[_flags] & SQLQueryFlags.notTagged) { + throw notTaggedCallError(); + } + + this[_run](true); + + return super.finally.$apply(this, arguments); + } +} + +Object.defineProperty(Query, Symbol.species, { value: PublicPromise }); +Object.defineProperty(Query, Symbol.toStringTag, { value: "Query" }); + +const enum SQLQueryResultMode { + objects = 0, + values = 1, + raw = 2, +} + +const enum SQLQueryFlags { + none = 0, + allowUnsafeTransaction = 1 << 0, + unsafe = 1 << 1, + bigint = 1 << 2, + simple = 1 << 3, + notTagged = 1 << 4, +} + +const enum SQLQueryStatus { + none = 0, + active = 1 << 1, + cancelled = 1 << 2, + error = 1 << 3, + executed = 1 << 4, + invalidHandle = 1 << 5, +} + +export default { + Query, + SQLQueryFlags, + SQLQueryResultMode, + SQLQueryStatus, + + symbols: { + _resolve, + _reject, + _handle, + _run, + _queryStatus, + _handler, + _strings, + _values, + _flags, + _results, + }, +}; diff --git a/src/js/internal/sql/shared.ts b/src/js/internal/sql/shared.ts new file mode 100644 index 0000000000..81c7d81545 --- /dev/null +++ b/src/js/internal/sql/shared.ts @@ -0,0 +1,559 @@ +const PublicArray = globalThis.Array; + +declare global { + interface NumberConstructor { + isSafeInteger(number: unknown): number is number; + isNaN(number: number): boolean; + } +} + +export type { SQLResultArray }; +class SQLResultArray extends PublicArray { + public count!: number | null; + public command!: string | null; + public lastInsertRowid!: number | bigint | null; + + static [Symbol.toStringTag] = "SQLResults"; + + constructor(values: T[] = []) { + super(...values); + + // match postgres's result array, in this way for in will not list the + // properties and .map will not return undefined command and count + Object.defineProperties(this, { + count: { value: null, writable: true }, + command: { value: null, writable: true }, + lastInsertRowid: { value: null, writable: true }, + }); + } + + static get [Symbol.species]() { + return Array; + } +} + +function decodeIfValid(value: string | null): string | null { + if (value) { + return decodeURIComponent(value); + } + return null; +} + +const enum SSLMode { + disable = 0, + prefer = 1, + require = 2, + verify_ca = 3, + verify_full = 4, +} +export type { SSLMode }; + +function normalizeSSLMode(value: string): SSLMode { + if (!value) { + return SSLMode.disable; + } + + value = (value + "").toLowerCase(); + switch (value) { + case "disable": + return SSLMode.disable; + case "prefer": + return SSLMode.prefer; + case "require": + case "required": + return SSLMode.require; + case "verify-ca": + case "verify_ca": + return SSLMode.verify_ca; + case "verify-full": + case "verify_full": + return SSLMode.verify_full; + default: { + break; + } + } + + throw $ERR_INVALID_ARG_VALUE("sslmode", value); +} + +export type { SQLHelper }; +class SQLHelper { + public readonly value: T; + public readonly columns: (keyof T)[]; + + constructor(value: T, keys?: (keyof T)[]) { + if (keys !== undefined && keys.length === 0) { + keys = Object.keys(value[0]) as (keyof T)[]; + } + + if (keys !== undefined) { + for (let key of keys) { + if (typeof key === "string") { + const asNumber = Number(key); + if (Number.isNaN(asNumber)) { + continue; + } + key = asNumber as keyof T; + } + + if (typeof key !== "string") { + if (Number.isSafeInteger(key)) { + if (key >= 0 && key <= 64 * 1024) { + continue; + } + } + + throw new Error(`Keys must be strings or numbers: ${String(key)}`); + } + } + } + + this.value = value; + this.columns = keys ?? []; + } +} + +function parseDefinitelySqliteUrl(value: string | URL | null): string | null { + if (value === null) return null; + const str = value instanceof URL ? value.toString() : value; + + if (str === ":memory:" || str === "sqlite://:memory:" || str === "sqlite:memory") return ":memory:"; + + // For any URL-like string, just extract the path portion + // Strip the protocol and handle query params + let path: string; + + if (str.startsWith("sqlite://")) { + path = str.slice(9); // "sqlite://".length + } else if (str.startsWith("sqlite:")) { + path = str.slice(7); // "sqlite:".length + } else if (str.startsWith("file://")) { + // For file:// URLs, use Bun's built-in converter for correct platform handling + // This properly handles Windows paths, UNC paths, etc. + try { + return Bun.fileURLToPath(str); + } catch { + // Fallback: just strip the protocol + path = str.slice(7); // "file://".length + } + } else if (str.startsWith("file:")) { + path = str.slice(5); // "file:".length + } else { + // Not a SQLite URL + return null; + } + + // Remove query parameters if present (only looking for ?) + const queryIndex = path.indexOf("?"); + if (queryIndex !== -1) { + path = path.slice(0, queryIndex); + } + + return path; +} + +function parseSQLiteOptionsWithQueryParams( + sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions, + urlString: string | URL | null | undefined, +): Bun.SQL.__internal.DefinedSQLiteOptions { + if (!urlString) return sqliteOptions; + + let params: URLSearchParams | null = null; + + if (urlString instanceof URL) { + params = urlString.searchParams; + } else { + const queryIndex = urlString.indexOf("?"); + if (queryIndex === -1) return sqliteOptions; + + const queryString = urlString.slice(queryIndex + 1); + params = new URLSearchParams(queryString); + } + + const mode = params.get("mode"); + + if (mode === "ro") { + sqliteOptions.readonly = true; + } else if (mode === "rw") { + sqliteOptions.readonly = false; + } else if (mode === "rwc") { + sqliteOptions.readonly = false; + sqliteOptions.create = true; + } + + return sqliteOptions; +} + +function isOptionsOfAdapter( + options: Bun.SQL.Options, + adapter: A, +): options is Extract { + return options.adapter === adapter; +} + +function assertIsOptionsOfAdapter( + options: Bun.SQL.Options, + adapter: A, +): asserts options is Extract { + if (!isOptionsOfAdapter(options, adapter)) { + throw new Error(`Expected adapter to be ${adapter}, but got '${options.adapter}'`); + } +} + +function parseOptions( + stringOrUrlOrOptions: Bun.SQL.Options | string | URL | undefined, + definitelyOptionsButMaybeEmpty: Bun.SQL.Options, +): Bun.SQL.__internal.DefinedOptions { + const env = Bun.env; + + let [stringOrUrl = env.POSTGRES_URL || env.DATABASE_URL || env.PGURL || env.PG_URL || null, options]: [ + string | URL | null, + Bun.SQL.Options, + ] = + typeof stringOrUrlOrOptions === "string" || stringOrUrlOrOptions instanceof URL + ? [stringOrUrlOrOptions, definitelyOptionsButMaybeEmpty] + : stringOrUrlOrOptions + ? [null, { ...stringOrUrlOrOptions, ...definitelyOptionsButMaybeEmpty }] + : [null, definitelyOptionsButMaybeEmpty]; + + if (options.adapter === undefined && stringOrUrl !== null) { + const sqliteUrl = parseDefinitelySqliteUrl(stringOrUrl); + + if (sqliteUrl !== null) { + const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = { + ...options, + adapter: "sqlite", + filename: sqliteUrl, + }; + + return parseSQLiteOptionsWithQueryParams(sqliteOptions, stringOrUrl); + } + } + + if (options.adapter === "sqlite") { + let filenameFromOptions = options.filename || stringOrUrl; + + // Parse sqlite:// URLs when adapter is explicitly sqlite + if (typeof filenameFromOptions === "string" || filenameFromOptions instanceof URL) { + const parsed = parseDefinitelySqliteUrl(filenameFromOptions); + if (parsed !== null) { + filenameFromOptions = parsed; + } + } + + const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = { + ...options, + adapter: "sqlite", + filename: filenameFromOptions || ":memory:", + }; + + return parseSQLiteOptionsWithQueryParams(sqliteOptions, stringOrUrl); + } + + if (options.adapter !== undefined && options.adapter !== "postgres" && options.adapter !== "postgresql") { + options.adapter satisfies never; // This will type error if we support a new adapter in the future, which will let us know to update this check + throw new Error(`Unsupported adapter: ${options.adapter}. Supported adapters: "postgres", "sqlite"`); + } + + // @ts-expect-error Compatibility + if (options.adapter === "postgresql") options.adapter = "postgres"; + if (options.adapter === undefined) options.adapter = "postgres"; + + assertIsOptionsOfAdapter(options, "postgres"); + + let hostname: string | undefined, + port: number | string | undefined, + username: string | null | undefined, + password: string | (() => Bun.MaybePromise) | undefined | null, + database: string | undefined, + tls: Bun.TLSOptions | boolean | undefined, + url: URL | undefined, + query: string, + idleTimeout: number | null | undefined, + connectionTimeout: number | null | undefined, + maxLifetime: number | null | undefined, + onconnect: ((client: Bun.SQL) => void) | undefined, + onclose: ((client: Bun.SQL) => void) | undefined, + max: number | null | undefined, + bigint: boolean | undefined, + path: string | string[]; + + let prepare = true; + let sslMode: SSLMode = SSLMode.disable; + + if (!stringOrUrl || (typeof stringOrUrl === "string" && stringOrUrl.length === 0)) { + let urlString = env.POSTGRES_URL || env.DATABASE_URL || env.PGURL || env.PG_URL; + + if (!urlString) { + urlString = env.TLS_POSTGRES_DATABASE_URL || env.TLS_DATABASE_URL; + if (urlString) { + sslMode = SSLMode.require; + } + } + + if (urlString) { + // Check if it's a SQLite URL before trying to parse as regular URL + const sqliteUrl = parseDefinitelySqliteUrl(urlString); + if (sqliteUrl !== null) { + const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = { + ...options, + adapter: "sqlite", + filename: sqliteUrl, + }; + return parseSQLiteOptionsWithQueryParams(sqliteOptions, urlString); + } + + url = new URL(urlString); + } + } else if (stringOrUrl && typeof stringOrUrl === "object") { + if (stringOrUrl instanceof URL) { + url = stringOrUrl; + } else if (options?.url) { + const _url = options.url; + if (typeof _url === "string") { + url = new URL(_url); + } else if (_url && typeof _url === "object" && _url instanceof URL) { + url = _url; + } + } + if (options?.tls) { + sslMode = SSLMode.require; + tls = options.tls; + } + } else if (typeof stringOrUrl === "string") { + try { + url = new URL(stringOrUrl); + } catch (e) { + throw new Error(`Invalid URL '${stringOrUrl}' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?`, { + cause: e, + }); + } + } + query = ""; + + if (url) { + ({ hostname, port, username, password } = options); + // object overrides url + hostname ||= url.hostname; + port ||= url.port; + username ||= decodeIfValid(url.username); + password ||= decodeIfValid(url.password); + + const queryObject = url.searchParams.toJSON(); + for (const key in queryObject) { + if (key.toLowerCase() === "sslmode") { + sslMode = normalizeSSLMode(queryObject[key]); + } else if (key.toLowerCase() === "path") { + path = queryObject[key]; + } else { + // this is valid for postgres for other databases it might not be valid + // check adapter then implement for other databases + // encode string with \0 as finalizer + // must be key\0value\0 + query += `${key}\0${queryObject[key]}\0`; + } + } + query = query.trim(); + } + hostname ||= options.hostname || options.host || env.PGHOST || "localhost"; + + port ||= Number(options.port || env.PGPORT || 5432); + + path ||= (options as { path?: string }).path || ""; + // add /.s.PGSQL.${port} if it doesn't exist + if (path && path?.indexOf("/.s.PGSQL.") === -1) { + path = `${path}/.s.PGSQL.${port}`; + } + + username ||= + options.username || options.user || env.PGUSERNAME || env.PGUSER || env.USER || env.USERNAME || "postgres"; + database ||= + options.database || options.db || decodeIfValid((url?.pathname ?? "").slice(1)) || env.PGDATABASE || username; + password ||= options.password || options.pass || env.PGPASSWORD || ""; + const connection = options.connection; + if (connection && $isObject(connection)) { + for (const key in connection) { + if (connection[key] !== undefined) { + query += `${key}\0${connection[key]}\0`; + } + } + } + + tls ||= options.tls || options.ssl; + max = options.max; + + idleTimeout ??= options.idleTimeout; + idleTimeout ??= options.idle_timeout; + connectionTimeout ??= options.connectionTimeout; + connectionTimeout ??= options.connection_timeout; + connectionTimeout ??= options.connectTimeout; + connectionTimeout ??= options.connect_timeout; + maxLifetime ??= options.maxLifetime; + maxLifetime ??= options.max_lifetime; + bigint ??= options.bigint; + // we need to explicitly set prepare to false if it is false + if (options.prepare === false) { + prepare = false; + } + + onconnect ??= options.onconnect; + onclose ??= options.onclose; + if (onconnect !== undefined) { + if (!$isCallable(onconnect)) { + throw $ERR_INVALID_ARG_TYPE("onconnect", "function", onconnect); + } + } + + if (onclose !== undefined) { + if (!$isCallable(onclose)) { + throw $ERR_INVALID_ARG_TYPE("onclose", "function", onclose); + } + } + + if (idleTimeout != null) { + idleTimeout = Number(idleTimeout); + if (idleTimeout > 2 ** 31 || idleTimeout < 0 || idleTimeout !== idleTimeout) { + throw $ERR_INVALID_ARG_VALUE( + "options.idle_timeout", + idleTimeout, + "must be a non-negative integer less than 2^31", + ); + } + idleTimeout *= 1000; + } + + if (connectionTimeout != null) { + connectionTimeout = Number(connectionTimeout); + if (connectionTimeout > 2 ** 31 || connectionTimeout < 0 || connectionTimeout !== connectionTimeout) { + throw $ERR_INVALID_ARG_VALUE( + "options.connection_timeout", + connectionTimeout, + "must be a non-negative integer less than 2^31", + ); + } + connectionTimeout *= 1000; + } + + if (maxLifetime != null) { + maxLifetime = Number(maxLifetime); + if (maxLifetime > 2 ** 31 || maxLifetime < 0 || maxLifetime !== maxLifetime) { + throw $ERR_INVALID_ARG_VALUE( + "options.max_lifetime", + maxLifetime, + "must be a non-negative integer less than 2^31", + ); + } + maxLifetime *= 1000; + } + + if (max != null) { + max = Number(max); + if (max > 2 ** 31 || max < 1 || max !== max) { + throw $ERR_INVALID_ARG_VALUE("options.max", max, "must be a non-negative integer between 1 and 2^31"); + } + } + + if (sslMode !== SSLMode.disable && !tls?.serverName) { + if (hostname) { + tls = { ...tls, serverName: hostname }; + } else if (tls) { + tls = true; + } + } + + if (tls && sslMode === SSLMode.disable) { + sslMode = SSLMode.prefer; + } + port = Number(port); + + if (!Number.isSafeInteger(port) || port < 1 || port > 65535) { + throw $ERR_INVALID_ARG_VALUE("port", port, "must be a non-negative integer between 1 and 65535"); + } + + const ret: Bun.SQL.__internal.DefinedPostgresOptions = { + adapter: "postgres", + hostname, + port, + username, + password, + database, + tls, + prepare, + bigint, + sslMode, + query, + max: max || 10, + }; + + if (idleTimeout != null) { + ret.idleTimeout = idleTimeout; + } + + if (connectionTimeout != null) { + ret.connectionTimeout = connectionTimeout; + } + + if (maxLifetime != null) { + ret.maxLifetime = maxLifetime; + } + + if (onconnect !== undefined) { + ret.onconnect = onconnect; + } + + if (onclose !== undefined) { + ret.onclose = onclose; + } + + return ret; +} + +export type OnConnected = ( + ...args: [error: null, connection: Connection] | [error: Error, connection: null] +) => void; + +export interface TransactionCommands { + BEGIN: string; + COMMIT: string; + ROLLBACK: string; + SAVEPOINT: string; + RELEASE_SAVEPOINT: string | null; + ROLLBACK_TO_SAVEPOINT: string; + BEFORE_COMMIT_OR_ROLLBACK?: string | null; +} + +export interface DatabaseAdapter { + normalizeQuery(strings: string | TemplateStringsArray, values: unknown[]): [sql: string, values: unknown[]]; + createQueryHandle(sql: string, values: unknown[], flags: number): QueryHandle; + connect(onConnected: OnConnected, reserved?: boolean): void; + release(connection: ConnectionHandle, connectingEvent?: boolean): void; + close(options?: { timeout?: number }): Promise; + flush(): void; + isConnected(): boolean; + get closed(): boolean; + + supportsReservedConnections?(): boolean; + getConnectionForQuery?(pooledConnection: Connection): ConnectionHandle | null; + attachConnectionCloseHandler?(connection: Connection, handler: () => void): void; + detachConnectionCloseHandler?(connection: Connection, handler: () => void): void; + + getTransactionCommands(options?: string): TransactionCommands; + getDistributedTransactionCommands?(name: string): TransactionCommands | null; + + validateTransactionOptions?(options: string): { valid: boolean; error?: string }; + validateDistributedTransactionName?(name: string): { valid: boolean; error?: string }; + + getCommitDistributedSQL?(name: string): string; + getRollbackDistributedSQL?(name: string): string; +} + +export default { + parseDefinitelySqliteUrl, + isOptionsOfAdapter, + assertIsOptionsOfAdapter, + parseOptions, + SQLHelper, + SSLMode, + normalizeSSLMode, + SQLResultArray, +}; diff --git a/src/js/internal/sql/sqlite.ts b/src/js/internal/sql/sqlite.ts new file mode 100644 index 0000000000..42b7cc439a --- /dev/null +++ b/src/js/internal/sql/sqlite.ts @@ -0,0 +1,785 @@ +import type * as BunSQLiteModule from "bun:sqlite"; +import type { BaseQueryHandle, Query, SQLQueryResultMode } from "./query"; +import type { DatabaseAdapter, OnConnected, SQLHelper, SQLResultArray } from "./shared"; + +const { SQLHelper, SQLResultArray } = require("internal/sql/shared"); +const { + Query, + SQLQueryResultMode, + symbols: { _strings, _values }, +} = require("internal/sql/query"); +const { escapeIdentifier, connectionClosedError } = require("internal/sql/utils"); +const { SQLiteError } = require("internal/sql/errors"); + +let lazySQLiteModule: typeof BunSQLiteModule; +function getSQLiteModule() { + if (!lazySQLiteModule) { + lazySQLiteModule = require("../../bun/sqlite.ts"); + } + return lazySQLiteModule; +} + +const enum SQLCommand { + insert = 0, + update = 1, + updateSet = 2, + where = 3, + whereIn = 4, + none = -1, +} + +interface SQLParsedInfo { + command: SQLCommand; + firstKeyword: string; // SELECT, INSERT, UPDATE, etc. + hasReturning: boolean; +} + +function commandToString(command: SQLCommand): string { + switch (command) { + case SQLCommand.insert: + return "INSERT"; + case SQLCommand.updateSet: + case SQLCommand.update: + return "UPDATE"; + case SQLCommand.whereIn: + case SQLCommand.where: + return "WHERE"; + default: + return ""; + } +} + +function matchAsciiIgnoreCase(str: string, start: number, end: number, target: string): boolean { + if (end - start !== target.length) return false; + for (let i = 0; i < target.length; i++) { + const c = str.charCodeAt(start + i); + const t = target.charCodeAt(i); + + if (c !== t) { + if (c >= 65 && c <= 90) { + if (c + 32 !== t) return false; + } else if (c >= 97 && c <= 122) { + if (c - 32 !== t) return false; + } else { + return false; + } + } + } + + return true; +} + +// Check if character is whitespace or delimiter (anything that's not a letter/digit/underscore) +function isTokenDelimiter(code: number): boolean { + // Quick check for common ASCII whitespace + if (code <= 32) return true; + // Letters A-Z, a-z + if ((code >= 65 && code <= 90) || (code >= 97 && code <= 122)) return false; + // Digits 0-9 + if (code >= 48 && code <= 57) return false; + // Underscore (allowed in SQL identifiers) + if (code === 95) return false; + // Everything else is a delimiter (including Unicode whitespace, punctuation, etc.) + return true; +} + +function parseSQLQuery(query: string): SQLParsedInfo { + const text_len = query.length; + + // Skip leading whitespace/delimiters + let i = 0; + while (i < text_len && isTokenDelimiter(query.charCodeAt(i))) { + i++; + } + + let command = SQLCommand.none; + let firstKeyword = ""; + let hasReturning = false; + let quotedDouble = false; + let tokenStart = i; + + while (i < text_len) { + const char = query[i]; + const charCode = query.charCodeAt(i); + + // Handle quotes BEFORE checking delimiters, since quotes are also delimiters + // Handle single quotes - skip entire string literal + if (!quotedDouble && char === "'") { + // Process any pending token before the quote + if (i > tokenStart) { + // We have a token to process before the quote + // Check what token it is + // Track the first keyword for the command string + if (!firstKeyword) { + if (matchAsciiIgnoreCase(query, tokenStart, i, "select")) { + firstKeyword = "SELECT"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "insert")) { + firstKeyword = "INSERT"; + command = SQLCommand.insert; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "update")) { + firstKeyword = "UPDATE"; + command = SQLCommand.update; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "delete")) { + firstKeyword = "DELETE"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "create")) { + firstKeyword = "CREATE"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "drop")) { + firstKeyword = "DROP"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "alter")) { + firstKeyword = "ALTER"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "pragma")) { + firstKeyword = "PRAGMA"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "explain")) { + firstKeyword = "EXPLAIN"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "with")) { + firstKeyword = "WITH"; + } + } else { + // After we have the first keyword, look for other keywords + if (matchAsciiIgnoreCase(query, tokenStart, i, "where")) { + command = SQLCommand.where; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "set")) { + if (command === SQLCommand.update) { + command = SQLCommand.updateSet; + } + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "in")) { + if (command === SQLCommand.where) { + command = SQLCommand.whereIn; + } + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "returning")) { + hasReturning = true; + } + } + } + + // Now skip the entire string literal + i++; + while (i < text_len) { + if (query[i] === "'") { + // Check for escaped quote + if (i + 1 < text_len && query[i + 1] === "'") { + i += 2; // Skip escaped quote + continue; + } + i++; + break; + } + i++; + } + // After string, skip any whitespace and reset token start + while (i < text_len && isTokenDelimiter(query.charCodeAt(i))) { + i++; + } + tokenStart = i; + continue; + } + + if (char === '"') { + quotedDouble = !quotedDouble; + i++; + continue; + } + + if (quotedDouble) { + i++; + continue; + } + + if (isTokenDelimiter(charCode)) { + if (i > tokenStart) { + // Track the first keyword for the command string + if (!firstKeyword) { + if (matchAsciiIgnoreCase(query, tokenStart, i, "select")) { + firstKeyword = "SELECT"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "insert")) { + firstKeyword = "INSERT"; + command = SQLCommand.insert; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "update")) { + firstKeyword = "UPDATE"; + command = SQLCommand.update; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "delete")) { + firstKeyword = "DELETE"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "create")) { + firstKeyword = "CREATE"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "drop")) { + firstKeyword = "DROP"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "alter")) { + firstKeyword = "ALTER"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "pragma")) { + firstKeyword = "PRAGMA"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "explain")) { + firstKeyword = "EXPLAIN"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "with")) { + firstKeyword = "WITH"; + } + } else { + // After we have the first keyword, look for other keywords + if (matchAsciiIgnoreCase(query, tokenStart, i, "where")) { + command = SQLCommand.where; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "set")) { + if (command === SQLCommand.update) { + command = SQLCommand.updateSet; + } + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "in")) { + if (command === SQLCommand.where) { + command = SQLCommand.whereIn; + } + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "returning")) { + hasReturning = true; + } + } + } + + // Skip delimiters but stop at quotes (they need special handling) + while (++i < text_len) { + const nextChar = query[i]; + if (nextChar === "'" || nextChar === '"') { + break; // Stop at quotes, they'll be handled in next iteration + } + if (!isTokenDelimiter(query.charCodeAt(i))) { + break; // Stop at non-delimiter + } + } + tokenStart = i; + continue; + } + i++; + } + + // Handle last token if we reached end of string + if (i >= text_len && i > tokenStart && !quotedDouble) { + // Track the first keyword for the command string + if (!firstKeyword) { + if (matchAsciiIgnoreCase(query, tokenStart, i, "select")) { + firstKeyword = "SELECT"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "insert")) { + firstKeyword = "INSERT"; + command = SQLCommand.insert; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "update")) { + firstKeyword = "UPDATE"; + command = SQLCommand.update; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "delete")) { + firstKeyword = "DELETE"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "create")) { + firstKeyword = "CREATE"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "drop")) { + firstKeyword = "DROP"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "alter")) { + firstKeyword = "ALTER"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "pragma")) { + firstKeyword = "PRAGMA"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "explain")) { + firstKeyword = "EXPLAIN"; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "with")) { + firstKeyword = "WITH"; + } + } else { + // After we have the first keyword, look for other keywords + if (matchAsciiIgnoreCase(query, tokenStart, i, "where")) { + command = SQLCommand.where; + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "set")) { + if (command === SQLCommand.update) { + command = SQLCommand.updateSet; + } + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "in")) { + if (command === SQLCommand.where) { + command = SQLCommand.whereIn; + } + } else if (matchAsciiIgnoreCase(query, tokenStart, i, "returning")) { + hasReturning = true; + } + } + } + + return { command, firstKeyword, hasReturning }; +} + +export class SQLiteQueryHandle implements BaseQueryHandle { + private mode = SQLQueryResultMode.objects; + + private readonly sql: string; + private readonly values: unknown[]; + private readonly parsedInfo: SQLParsedInfo; + + public constructor(sql: string, values: unknown[]) { + this.sql = sql; + this.values = values; + // Parse the SQL query once when creating the handle + this.parsedInfo = parseSQLQuery(sql); + } + + setMode(mode: SQLQueryResultMode) { + this.mode = mode; + } + + run(db: BunSQLiteModule.Database, query: Query) { + if (!db) { + throw new SQLiteError("SQLite database not initialized", { + code: "SQLITE_CONNECTION_CLOSED", + errno: 0, + }); + } + + const { sql, values, mode, parsedInfo } = this; + + try { + const command = parsedInfo.firstKeyword; + + // For SELECT queries, we need to use a prepared statement + // For other queries, we can check if there are multiple statements and use db.run() if so + if ( + command === "SELECT" || + command === "PRAGMA" || + command === "WITH" || + command === "EXPLAIN" || + parsedInfo.hasReturning + ) { + // SELECT queries must use prepared statements for results + const stmt = db.prepare(sql); + let result: unknown[] | undefined; + + if (mode === SQLQueryResultMode.values) { + result = stmt.values.$apply(stmt, values); + } else if (mode === SQLQueryResultMode.raw) { + result = stmt.raw.$apply(stmt, values); + } else { + result = stmt.all.$apply(stmt, values); + } + + const sqlResult = $isArray(result) ? new SQLResultArray(result) : new SQLResultArray([result]); + + sqlResult.command = command; + sqlResult.count = $isArray(result) ? result.length : 1; + + stmt.finalize(); + query.resolve(sqlResult); + } else { + // For INSERT/UPDATE/DELETE/CREATE etc., use db.run() which handles multiple statements natively + const changes = db.run.$apply(db, [sql].concat(values)); + const sqlResult = new SQLResultArray(); + + sqlResult.command = command; + sqlResult.count = changes.changes; + sqlResult.lastInsertRowid = changes.lastInsertRowid; + + query.resolve(sqlResult); + } + } catch (err) { + // Convert bun:sqlite errors to SQLiteError + if (err && typeof err === "object" && "name" in err && err.name === "SQLiteError") { + // Extract SQLite error properties + const code = "code" in err ? String(err.code) : "SQLITE_ERROR"; + const errno = "errno" in err ? Number(err.errno) : 1; + const byteOffset = "byteOffset" in err ? Number(err.byteOffset) : undefined; + const message = "message" in err ? String(err.message) : "SQLite error"; + + throw new SQLiteError(message, { code, errno, byteOffset }); + } + // Re-throw if it's not a SQLite error + throw err; + } + } +} + +export class SQLiteAdapter + implements DatabaseAdapter +{ + public readonly connectionInfo: Bun.SQL.__internal.DefinedSQLiteOptions; + public db: BunSQLiteModule.Database | null = null; + public storedError: Error | null = null; + private _closed: boolean = false; + public queries: Set> = new Set(); + + constructor(connectionInfo: Bun.SQL.__internal.DefinedSQLiteOptions) { + this.connectionInfo = connectionInfo; + + try { + const SQLiteModule = getSQLiteModule(); + let { filename } = this.connectionInfo; + + if (filename instanceof URL) { + filename = filename.toString(); + } + + const options: BunSQLiteModule.DatabaseOptions = {}; + + if (this.connectionInfo.readonly) { + options.readonly = true; + } else { + options.create = this.connectionInfo.create !== false; + options.readwrite = true; + } + + if ("safeIntegers" in this.connectionInfo) { + options.safeIntegers = this.connectionInfo.safeIntegers; + } + if ("strict" in this.connectionInfo) { + options.strict = this.connectionInfo.strict; + } + + this.db = new SQLiteModule.Database(filename, options); + + try { + const onconnect = this.connectionInfo.onconnect; + if (onconnect) onconnect(null); + } catch {} + } catch (err) { + // Convert bun:sqlite initialization errors to SQLiteError + if (err && typeof err === "object" && "name" in err && err.name === "SQLiteError") { + const code = "code" in err ? String(err.code) : "SQLITE_ERROR"; + const errno = "errno" in err ? Number(err.errno) : 1; + const byteOffset = "byteOffset" in err ? Number(err.byteOffset) : undefined; + const message = "message" in err ? String(err.message) : "SQLite error"; + + this.storedError = new SQLiteError(message, { code, errno, byteOffset }); + } else { + this.storedError = err as Error; + } + + this.db = null; + try { + const onconnect = this.connectionInfo.onconnect; + if (onconnect) onconnect(this.storedError ?? (err as Error)); + } catch {} + } + } + + createQueryHandle(sql: string, values: unknown[] | undefined | null = []): SQLiteQueryHandle { + return new SQLiteQueryHandle(sql, values ?? []); + } + + normalizeQuery(strings: string | TemplateStringsArray, values: unknown[], binding_idx = 1): [string, unknown[]] { + if (typeof strings === "string") { + // identifier or unsafe query + return [strings, values || []]; + } + + if (!$isArray(strings)) { + // we should not hit this path + throw new SyntaxError("Invalid query: SQL Fragment cannot be executed or was misused"); + } + + const str_len = strings.length; + if (str_len === 0) { + return ["", []]; + } + + let binding_values: any[] = []; + let query = ""; + let cachedCommand: SQLCommand | null = null; + + for (let i = 0; i < str_len; i++) { + const string = strings[i]; + + if (typeof string === "string") { + query += string; + + if (values.length > i) { + const value = values[i]; + + if (value instanceof Query) { + const q = value as Query; + const [sub_query, sub_values] = this.normalizeQuery(q[_strings], q[_values], binding_idx); + + query += sub_query; + for (let j = 0; j < sub_values.length; j++) { + binding_values.push(sub_values[j]); + } + binding_idx += sub_values.length; + } else if (value instanceof SQLHelper) { + if (cachedCommand === null) { + const { command } = parseSQLQuery(query); + cachedCommand = command; + } + const command = cachedCommand; + + // only selectIn, insert, update, updateSet are allowed + if (command === SQLCommand.none || command === SQLCommand.where) { + throw new SyntaxError("Helpers are only allowed for INSERT, UPDATE and WHERE IN commands"); + } + const { columns, value: items } = value as SQLHelper; + const columnCount = columns.length; + if (columnCount === 0 && command !== SQLCommand.whereIn) { + throw new SyntaxError(`Cannot ${commandToString(command)} with no columns`); + } + const lastColumnIndex = columns.length - 1; + + if (command === SQLCommand.insert) { + // + // insert into users ${sql(users)} or insert into users ${sql(user)} + // + + query += "("; + for (let j = 0; j < columnCount; j++) { + query += escapeIdentifier(columns[j]); + if (j < lastColumnIndex) { + query += ", "; + } + } + query += ") VALUES"; + if ($isArray(items)) { + const itemsCount = items.length; + const lastItemIndex = itemsCount - 1; + for (let j = 0; j < itemsCount; j++) { + query += "("; + const item = items[j]; + for (let k = 0; k < columnCount; k++) { + const column = columns[k]; + const columnValue = item[column]; + // SQLite uses ? for placeholders, not $1, $2, etc. + query += `?${k < lastColumnIndex ? ", " : ""}`; + if (typeof columnValue === "undefined") { + binding_values.push(null); + } else { + binding_values.push(columnValue); + } + } + if (j < lastItemIndex) { + query += "),"; + } else { + query += ") "; // the user can add RETURNING * or RETURNING id + } + } + } else { + query += "("; + const item = items; + for (let j = 0; j < columnCount; j++) { + const column = columns[j]; + const columnValue = item[column]; + // SQLite uses ? for placeholders + query += `?${j < lastColumnIndex ? ", " : ""}`; + if (typeof columnValue === "undefined") { + binding_values.push(null); + } else { + binding_values.push(columnValue); + } + } + query += ") "; // the user can add RETURNING * or RETURNING id + } + } else if (command === SQLCommand.whereIn) { + // SELECT * FROM users WHERE id IN (${sql([1, 2, 3])}) + if (!$isArray(items)) { + throw new SyntaxError("An array of values is required for WHERE IN helper"); + } + const itemsCount = items.length; + const lastItemIndex = itemsCount - 1; + query += "("; + for (let j = 0; j < itemsCount; j++) { + // SQLite uses ? for placeholders + query += `?${j < lastItemIndex ? ", " : ""}`; + if (columnCount > 0) { + // we must use a key from a object + if (columnCount > 1) { + // we should not pass multiple columns here + throw new SyntaxError("Cannot use WHERE IN helper with multiple columns"); + } + // SELECT * FROM users WHERE id IN (${sql(users, "id")}) + const value = items[j]; + if (typeof value === "undefined") { + binding_values.push(null); + } else { + const value_from_key = value[columns[0]]; + + if (typeof value_from_key === "undefined") { + binding_values.push(null); + } else { + binding_values.push(value_from_key); + } + } + } else { + const value = items[j]; + if (typeof value === "undefined") { + binding_values.push(null); + } else { + binding_values.push(value); + } + } + } + query += ") "; // more conditions can be added after this + } else { + // UPDATE users SET ${sql({ name: "John", age: 31 })} WHERE id = 1 + let item; + if ($isArray(items)) { + if (items.length > 1) { + throw new SyntaxError("Cannot use array of objects for UPDATE"); + } + item = items[0]; + } else { + item = items; + } + // no need to include if is updateSet + if (command === SQLCommand.update) { + query += " SET "; + } + for (let i = 0; i < columnCount; i++) { + const column = columns[i]; + const columnValue = item[column]; + // SQLite uses ? for placeholders + query += `${escapeIdentifier(column)} = ?${i < lastColumnIndex ? ", " : ""}`; + if (typeof columnValue === "undefined") { + binding_values.push(null); + } else { + binding_values.push(columnValue); + } + } + query += " "; // the user can add where clause after this + } + } else { + // SQLite uses ? for placeholders + query += `? `; + if (typeof value === "undefined") { + binding_values.push(null); + } else { + binding_values.push(value); + } + } + } + } else { + throw new SyntaxError("Invalid query: SQL Fragment cannot be executed or was misused"); + } + } + + return [query, binding_values]; + } + + connect(onConnected: OnConnected, reserved?: boolean) { + if (this._closed) { + return onConnected(connectionClosedError(), null); + } + + // SQLite doesn't support reserved connections since it doesn't have a connection pool + // Reserved connections are meant for exclusive use from a pool, which SQLite doesn't have + if (reserved) { + return onConnected(new Error("SQLite doesn't support connection reservation (no connection pool)"), null); + } + + // Since SQLite connection is synchronous, we immediately know the result + if (this.storedError) { + onConnected(this.storedError, null); + } else if (this.db) { + onConnected(null, this.db); + } else { + onConnected(connectionClosedError(), null); + } + } + + release(_connection: BunSQLiteModule.Database, _connectingEvent?: boolean) { + // SQLite doesn't need to release connections since we don't pool. We + // shouldn't throw or prevent the user facing API from releasing connections + // so we can just no-op here + } + + async close(_options?: { timeout?: number }) { + if (this._closed) { + return; + } + + this._closed = true; + + this.storedError = new Error("Connection closed"); + + if (this.db) { + try { + this.db.close(); + } catch {} + this.db = null; + } + + try { + const onclose = this.connectionInfo.onclose; + if (onclose) onclose(this.storedError); + } catch {} + } + + flush() { + // SQLite executes queries synchronously, so there's nothing to flush + throw new Error("SQLite doesn't support flush() - queries are executed synchronously"); + } + + isConnected() { + return this.db !== null; + } + + get closed(): boolean { + return this._closed; + } + + supportsReservedConnections(): boolean { + // SQLite doesn't have a connection pool, so it doesn't support reserved connections + return false; + } + + getConnectionForQuery(connection: BunSQLiteModule.Database): BunSQLiteModule.Database { + return connection; + } + + getTransactionCommands(options?: string): import("./shared").TransactionCommands { + let BEGIN = "BEGIN"; + + if (options) { + // SQLite supports DEFERRED, IMMEDIATE, EXCLUSIVE + const upperOptions = options.toUpperCase(); + if (upperOptions === "DEFERRED" || upperOptions === "IMMEDIATE" || upperOptions === "EXCLUSIVE") { + BEGIN = `BEGIN ${upperOptions}`; + } else if (upperOptions === "READONLY" || upperOptions === "READ") { + // SQLite doesn't support readonly transactions + throw new Error(`SQLite doesn't support '${options}' transaction mode. Use DEFERRED, IMMEDIATE, or EXCLUSIVE.`); + } else { + BEGIN = `BEGIN ${options}`; + } + } + + return { + BEGIN, + COMMIT: "COMMIT", + ROLLBACK: "ROLLBACK", + SAVEPOINT: "SAVEPOINT", + RELEASE_SAVEPOINT: "RELEASE SAVEPOINT", + ROLLBACK_TO_SAVEPOINT: "ROLLBACK TO SAVEPOINT", + }; + } + + getDistributedTransactionCommands(_name: string): import("./shared").TransactionCommands | null { + // SQLite doesn't support distributed transactions + return null; + } + + validateTransactionOptions(options: string): { valid: boolean; error?: string } { + if (!options) { + return { valid: true }; + } + + const upperOptions = options.toUpperCase(); + if (upperOptions === "READONLY" || upperOptions === "READ") { + return { + valid: false, + error: `SQLite doesn't support '${options}' transaction mode. Use DEFERRED, IMMEDIATE, or EXCLUSIVE.`, + }; + } + + // SQLite will handle validation of other options + return { valid: true }; + } + + validateDistributedTransactionName(): { valid: boolean; error?: string } { + return { + valid: false, + error: "SQLite doesn't support distributed transactions.", + }; + } + + getCommitDistributedSQL(): string { + throw new Error("SQLite doesn't support distributed transactions."); + } + + getRollbackDistributedSQL(): string { + throw new Error("SQLite doesn't support distributed transactions."); + } +} + +export default { + SQLiteAdapter, + SQLCommand, + commandToString, + parseSQLQuery, +}; diff --git a/src/js/internal/sql/utils.ts b/src/js/internal/sql/utils.ts new file mode 100644 index 0000000000..8b2e0b68ad --- /dev/null +++ b/src/js/internal/sql/utils.ts @@ -0,0 +1,26 @@ +const { hideFromStack } = require("../shared.ts"); +const { PostgresError } = require("./errors"); + +function connectionClosedError() { + return new PostgresError("Connection closed", { + code: "ERR_POSTGRES_CONNECTION_CLOSED", + }); +} +hideFromStack(connectionClosedError); + +function notTaggedCallError() { + return new PostgresError("Query not called as a tagged template literal", { + code: "ERR_POSTGRES_NOT_TAGGED_CALL", + }); +} +hideFromStack(notTaggedCallError); + +function escapeIdentifier(str: string) { + return '"' + str.replaceAll('"', '""').replaceAll(".", '"."') + '"'; +} + +export default { + connectionClosedError, + notTaggedCallError, + escapeIdentifier, +}; diff --git a/src/js/private.d.ts b/src/js/private.d.ts index 83085b2656..3f46c32c9f 100644 --- a/src/js/private.d.ts +++ b/src/js/private.d.ts @@ -10,6 +10,31 @@ type BunWatchListener = (event: WatchEventType, filename: T | undefined) => v */ declare function $bundleError(...message: any[]): never; +declare module "bun" { + namespace SQL.__internal { + type Define = T & { + [Key in K | "adapter"]: NonNullable; + } & {}; + + type Adapter = NonNullable; + + /** + * Represents the result of the `parseOptions()` function in the sqlite path + */ + type DefinedSQLiteOptions = Define; + + /** + * Represents the result of the `parseOptions()` function in the postgres path + */ + type DefinedPostgresOptions = Define & { + sslMode: import("internal/sql/shared").SSLMode; + query: string; + }; + + type DefinedOptions = DefinedSQLiteOptions | DefinedPostgresOptions; + } +} + interface BunFSWatcher { /** * Stop watching for changes on the given `BunFSWatcher`. Once stopped, the `BunFSWatcher` object is no longer usable. @@ -224,7 +249,6 @@ declare function $newZigFunction any>( declare function $bindgenFn any>(filename: string, symbol: string): T; // NOTE: $debug, $assert, and $isPromiseFulfilled omitted -import "node:net"; declare module "node:net" { export function _normalizeArgs(args: any[]): unknown[]; diff --git a/src/sql/postgres/AnyPostgresError.zig b/src/sql/postgres/AnyPostgresError.zig index 58582814ad..7f79945cea 100644 --- a/src/sql/postgres/AnyPostgresError.zig +++ b/src/sql/postgres/AnyPostgresError.zig @@ -33,37 +33,103 @@ pub const AnyPostgresError = error{ UnknownFormatCode, }; +/// Options for creating a PostgresError +pub const PostgresErrorOptions = struct { + code: []const u8, + errno: ?[]const u8 = null, + detail: ?[]const u8 = null, + hint: ?[]const u8 = null, + severity: ?[]const u8 = null, + position: ?[]const u8 = null, + internalPosition: ?[]const u8 = null, + internalQuery: ?[]const u8 = null, + where: ?[]const u8 = null, + schema: ?[]const u8 = null, + table: ?[]const u8 = null, + column: ?[]const u8 = null, + dataType: ?[]const u8 = null, + constraint: ?[]const u8 = null, + file: ?[]const u8 = null, + line: ?[]const u8 = null, + routine: ?[]const u8 = null, +}; + +pub fn createPostgresError( + globalObject: *jsc.JSGlobalObject, + message: []const u8, + options: PostgresErrorOptions, +) bun.JSError!JSValue { + const bun_ns = (try globalObject.toJSValue().get(globalObject, "Bun")).?; + const sql_constructor = (try bun_ns.get(globalObject, "SQL")).?; + const pg_error_constructor = (try sql_constructor.get(globalObject, "PostgresError")).?; + + const opts_obj = JSValue.createEmptyObject(globalObject, 0); + opts_obj.put(globalObject, jsc.ZigString.static("code"), jsc.ZigString.init(options.code).toJS(globalObject)); + + if (options.errno) |errno| opts_obj.put(globalObject, jsc.ZigString.static("errno"), jsc.ZigString.init(errno).toJS(globalObject)); + if (options.detail) |detail| opts_obj.put(globalObject, jsc.ZigString.static("detail"), jsc.ZigString.init(detail).toJS(globalObject)); + if (options.hint) |hint| opts_obj.put(globalObject, jsc.ZigString.static("hint"), jsc.ZigString.init(hint).toJS(globalObject)); + if (options.severity) |severity| opts_obj.put(globalObject, jsc.ZigString.static("severity"), jsc.ZigString.init(severity).toJS(globalObject)); + if (options.position) |pos| opts_obj.put(globalObject, jsc.ZigString.static("position"), jsc.ZigString.init(pos).toJS(globalObject)); + if (options.internalPosition) |pos| opts_obj.put(globalObject, jsc.ZigString.static("internalPosition"), jsc.ZigString.init(pos).toJS(globalObject)); + if (options.internalQuery) |query| opts_obj.put(globalObject, jsc.ZigString.static("internalQuery"), jsc.ZigString.init(query).toJS(globalObject)); + if (options.where) |w| opts_obj.put(globalObject, jsc.ZigString.static("where"), jsc.ZigString.init(w).toJS(globalObject)); + if (options.schema) |s| opts_obj.put(globalObject, jsc.ZigString.static("schema"), jsc.ZigString.init(s).toJS(globalObject)); + if (options.table) |t| opts_obj.put(globalObject, jsc.ZigString.static("table"), jsc.ZigString.init(t).toJS(globalObject)); + if (options.column) |c| opts_obj.put(globalObject, jsc.ZigString.static("column"), jsc.ZigString.init(c).toJS(globalObject)); + if (options.dataType) |dt| opts_obj.put(globalObject, jsc.ZigString.static("dataType"), jsc.ZigString.init(dt).toJS(globalObject)); + if (options.constraint) |c| opts_obj.put(globalObject, jsc.ZigString.static("constraint"), jsc.ZigString.init(c).toJS(globalObject)); + if (options.file) |f| opts_obj.put(globalObject, jsc.ZigString.static("file"), jsc.ZigString.init(f).toJS(globalObject)); + if (options.line) |l| opts_obj.put(globalObject, jsc.ZigString.static("line"), jsc.ZigString.init(l).toJS(globalObject)); + if (options.routine) |r| opts_obj.put(globalObject, jsc.ZigString.static("routine"), jsc.ZigString.init(r).toJS(globalObject)); + + const args = [_]JSValue{ + jsc.ZigString.init(message).toJS(globalObject), + opts_obj, + }; + + const JSC = @import("../../bun.js/javascript_core_c_api.zig"); + var exception: JSC.JSValueRef = null; + const result = JSC.JSObjectCallAsConstructor(globalObject, pg_error_constructor.asObjectRef(), args.len, @ptrCast(&args), &exception); + + if (exception != null) { + return bun.JSError.JSError; + } + + return JSValue.fromRef(result); +} + pub fn postgresErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8, err: AnyPostgresError) JSValue { - const error_code: jsc.Error = switch (err) { - error.ConnectionClosed => .POSTGRES_CONNECTION_CLOSED, - error.ExpectedRequest => .POSTGRES_EXPECTED_REQUEST, - error.ExpectedStatement => .POSTGRES_EXPECTED_STATEMENT, - error.InvalidBackendKeyData => .POSTGRES_INVALID_BACKEND_KEY_DATA, - error.InvalidBinaryData => .POSTGRES_INVALID_BINARY_DATA, - error.InvalidByteSequence => .POSTGRES_INVALID_BYTE_SEQUENCE, - error.InvalidByteSequenceForEncoding => .POSTGRES_INVALID_BYTE_SEQUENCE_FOR_ENCODING, - error.InvalidCharacter => .POSTGRES_INVALID_CHARACTER, - error.InvalidMessage => .POSTGRES_INVALID_MESSAGE, - error.InvalidMessageLength => .POSTGRES_INVALID_MESSAGE_LENGTH, - error.InvalidQueryBinding => .POSTGRES_INVALID_QUERY_BINDING, - error.InvalidServerKey => .POSTGRES_INVALID_SERVER_KEY, - error.InvalidServerSignature => .POSTGRES_INVALID_SERVER_SIGNATURE, - error.MultidimensionalArrayNotSupportedYet => .POSTGRES_MULTIDIMENSIONAL_ARRAY_NOT_SUPPORTED_YET, - error.NullsInArrayNotSupportedYet => .POSTGRES_NULLS_IN_ARRAY_NOT_SUPPORTED_YET, - error.Overflow => .POSTGRES_OVERFLOW, - error.PBKDFD2 => .POSTGRES_AUTHENTICATION_FAILED_PBKDF2, - error.SASL_SIGNATURE_MISMATCH => .POSTGRES_SASL_SIGNATURE_MISMATCH, - error.SASL_SIGNATURE_INVALID_BASE64 => .POSTGRES_SASL_SIGNATURE_INVALID_BASE64, - error.TLSNotAvailable => .POSTGRES_TLS_NOT_AVAILABLE, - error.TLSUpgradeFailed => .POSTGRES_TLS_UPGRADE_FAILED, - error.UnexpectedMessage => .POSTGRES_UNEXPECTED_MESSAGE, - error.UNKNOWN_AUTHENTICATION_METHOD => .POSTGRES_UNKNOWN_AUTHENTICATION_METHOD, - error.UNSUPPORTED_AUTHENTICATION_METHOD => .POSTGRES_UNSUPPORTED_AUTHENTICATION_METHOD, - error.UnsupportedByteaFormat => .POSTGRES_UNSUPPORTED_BYTEA_FORMAT, - error.UnsupportedArrayFormat => .POSTGRES_UNSUPPORTED_ARRAY_FORMAT, - error.UnsupportedIntegerSize => .POSTGRES_UNSUPPORTED_INTEGER_SIZE, - error.UnsupportedNumericFormat => .POSTGRES_UNSUPPORTED_NUMERIC_FORMAT, - error.UnknownFormatCode => .POSTGRES_UNKNOWN_FORMAT_CODE, + const code = switch (err) { + error.ConnectionClosed => "ERR_POSTGRES_CONNECTION_CLOSED", + error.ExpectedRequest => "ERR_POSTGRES_EXPECTED_REQUEST", + error.ExpectedStatement => "ERR_POSTGRES_EXPECTED_STATEMENT", + error.InvalidBackendKeyData => "ERR_POSTGRES_INVALID_BACKEND_KEY_DATA", + error.InvalidBinaryData => "ERR_POSTGRES_INVALID_BINARY_DATA", + error.InvalidByteSequence => "ERR_POSTGRES_INVALID_BYTE_SEQUENCE", + error.InvalidByteSequenceForEncoding => "ERR_POSTGRES_INVALID_BYTE_SEQUENCE_FOR_ENCODING", + error.InvalidCharacter => "ERR_POSTGRES_INVALID_CHARACTER", + error.InvalidMessage => "ERR_POSTGRES_INVALID_MESSAGE", + error.InvalidMessageLength => "ERR_POSTGRES_INVALID_MESSAGE_LENGTH", + error.InvalidQueryBinding => "ERR_POSTGRES_INVALID_QUERY_BINDING", + error.InvalidServerKey => "ERR_POSTGRES_INVALID_SERVER_KEY", + error.InvalidServerSignature => "ERR_POSTGRES_INVALID_SERVER_SIGNATURE", + error.MultidimensionalArrayNotSupportedYet => "ERR_POSTGRES_MULTIDIMENSIONAL_ARRAY_NOT_SUPPORTED_YET", + error.NullsInArrayNotSupportedYet => "ERR_POSTGRES_NULLS_IN_ARRAY_NOT_SUPPORTED_YET", + error.Overflow => "ERR_POSTGRES_OVERFLOW", + error.PBKDFD2 => "ERR_POSTGRES_AUTHENTICATION_FAILED_PBKDF2", + error.SASL_SIGNATURE_MISMATCH => "ERR_POSTGRES_SASL_SIGNATURE_MISMATCH", + error.SASL_SIGNATURE_INVALID_BASE64 => "ERR_POSTGRES_SASL_SIGNATURE_INVALID_BASE64", + error.TLSNotAvailable => "ERR_POSTGRES_TLS_NOT_AVAILABLE", + error.TLSUpgradeFailed => "ERR_POSTGRES_TLS_UPGRADE_FAILED", + error.UnexpectedMessage => "ERR_POSTGRES_UNEXPECTED_MESSAGE", + error.UNKNOWN_AUTHENTICATION_METHOD => "ERR_POSTGRES_UNKNOWN_AUTHENTICATION_METHOD", + error.UNSUPPORTED_AUTHENTICATION_METHOD => "ERR_POSTGRES_UNSUPPORTED_AUTHENTICATION_METHOD", + error.UnsupportedByteaFormat => "ERR_POSTGRES_UNSUPPORTED_BYTEA_FORMAT", + error.UnsupportedArrayFormat => "ERR_POSTGRES_UNSUPPORTED_ARRAY_FORMAT", + error.UnsupportedIntegerSize => "ERR_POSTGRES_UNSUPPORTED_INTEGER_SIZE", + error.UnsupportedNumericFormat => "ERR_POSTGRES_UNSUPPORTED_NUMERIC_FORMAT", + error.UnknownFormatCode => "ERR_POSTGRES_UNKNOWN_FORMAT_CODE", error.JSError => { return globalObject.takeException(error.JSError); }, @@ -75,13 +141,17 @@ pub fn postgresErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8 bun.unreachablePanic("Assertion failed: ShortRead should be handled by the caller in postgres", .{}); }, }; - if (message) |msg| { - return error_code.fmt(globalObject, "{s}", .{msg}); + + const msg = message orelse std.fmt.allocPrint(bun.default_allocator, "Failed to bind query: {s}", .{@errorName(err)}) catch unreachable; + defer { + if (message == null) bun.default_allocator.free(msg); } - return error_code.fmt(globalObject, "Failed to bind query: {s}", .{@errorName(err)}); + + return createPostgresError(globalObject, msg, .{ .code = code }) catch |e| globalObject.takeError(e); } const bun = @import("bun"); +const std = @import("std"); const jsc = bun.jsc; const JSValue = jsc.JSValue; diff --git a/src/sql/postgres/PostgresSQLConnection.zig b/src/sql/postgres/PostgresSQLConnection.zig index fdf05b08ed..483945ceba 100644 --- a/src/sql/postgres/PostgresSQLConnection.zig +++ b/src/sql/postgres/PostgresSQLConnection.zig @@ -213,13 +213,13 @@ pub fn onConnectionTimeout(this: *PostgresSQLConnection) bun.api.Timer.EventLoop switch (this.status) { .connected => { - this.failFmt(.POSTGRES_IDLE_TIMEOUT, "Idle timeout reached after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.idle_timeout_interval_ms) *| std.time.ns_per_ms)}); + this.failFmt("ERR_POSTGRES_IDLE_TIMEOUT", "Idle timeout reached after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.idle_timeout_interval_ms) *| std.time.ns_per_ms)}); }, else => { - this.failFmt(.POSTGRES_CONNECTION_TIMEOUT, "Connection timeout after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)}); + this.failFmt("ERR_POSTGRES_CONNECTION_TIMEOUT", "Connection timeout after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)}); }, .sent_startup_message => { - this.failFmt(.POSTGRES_CONNECTION_TIMEOUT, "Connection timed out after {} (sent startup message, but never received response)", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)}); + this.failFmt("ERR_POSTGRES_CONNECTION_TIMEOUT", "Connection timed out after {} (sent startup message, but never received response)", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)}); }, } return .disarm; @@ -229,7 +229,7 @@ pub fn onMaxLifetimeTimeout(this: *PostgresSQLConnection) bun.api.Timer.EventLoo debug("onMaxLifetimeTimeout", .{}); this.max_lifetime_timer.state = .FIRED; if (this.status == .failed) return .disarm; - this.failFmt(.POSTGRES_LIFETIME_TIMEOUT, "Max lifetime timeout reached after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.max_lifetime_interval_ms) *| std.time.ns_per_ms)}); + this.failFmt("ERR_POSTGRES_LIFETIME_TIMEOUT", "Max lifetime timeout reached after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.max_lifetime_interval_ms) *| std.time.ns_per_ms)}); return .disarm; } @@ -332,8 +332,13 @@ pub fn failWithJSValue(this: *PostgresSQLConnection, value: JSValue) void { ) catch |e| this.globalObject.reportActiveExceptionAsUnhandled(e); } -pub fn failFmt(this: *PostgresSQLConnection, comptime error_code: jsc.Error, comptime fmt: [:0]const u8, args: anytype) void { - this.failWithJSValue(error_code.fmt(this.globalObject, fmt, args)); +pub fn failFmt(this: *PostgresSQLConnection, code: []const u8, comptime fmt: [:0]const u8, args: anytype) void { + const message = std.fmt.allocPrint(bun.default_allocator, fmt, args) catch bun.outOfMemory(); + defer bun.default_allocator.free(message); + + const err = createPostgresError(this.globalObject, message, .{ .code = code }) catch |e| this.globalObject.takeError(e); + + this.failWithJSValue(err); } pub fn fail(this: *PostgresSQLConnection, message: []const u8, err: AnyPostgresError) void { @@ -1825,6 +1830,7 @@ const Status = @import("./Status.zig").Status; const TLSStatus = @import("./TLSStatus.zig").TLSStatus; const AnyPostgresError = @import("./AnyPostgresError.zig").AnyPostgresError; +const createPostgresError = @import("./AnyPostgresError.zig").createPostgresError; const postgresErrorToJS = @import("./AnyPostgresError.zig").postgresErrorToJS; const bun = @import("bun"); diff --git a/src/sql/postgres/PostgresSQLQuery.zig b/src/sql/postgres/PostgresSQLQuery.zig index c8503c53f1..c1b3cedbc0 100644 --- a/src/sql/postgres/PostgresSQLQuery.zig +++ b/src/sql/postgres/PostgresSQLQuery.zig @@ -120,7 +120,8 @@ pub fn onJSError(this: *@This(), err: jsc.JSValue, globalObject: *jsc.JSGlobalOb }); } pub fn onError(this: *@This(), err: PostgresSQLStatement.Error, globalObject: *jsc.JSGlobalObject) void { - this.onJSError(err.toJS(globalObject), globalObject); + const e = err.toJS(globalObject) catch return; + this.onJSError(e, globalObject); } pub fn allowGC(thisValue: jsc.JSValue, globalObject: *jsc.JSGlobalObject) void { @@ -377,7 +378,8 @@ pub fn doRun(this: *PostgresSQLQuery, globalObject: *jsc.JSGlobalObject, callfra stmt.deref(); this.deref(); // If the statement failed, we need to throw the error - return globalObject.throwValue(this.statement.?.error_response.?.toJS(globalObject)); + const e = try this.statement.?.error_response.?.toJS(globalObject); + return globalObject.throwValue(e); }, .prepared => { if (!connection.hasQueryRunning() or connection.canPipeline()) { diff --git a/src/sql/postgres/PostgresSQLStatement.zig b/src/sql/postgres/PostgresSQLStatement.zig index 8e850c1bf8..1026d86b22 100644 --- a/src/sql/postgres/PostgresSQLStatement.zig +++ b/src/sql/postgres/PostgresSQLStatement.zig @@ -23,13 +23,14 @@ pub const Error = union(enum) { } } - pub fn toJS(this: *const @This(), globalObject: *jsc.JSGlobalObject) JSValue { + pub fn toJS(this: *const @This(), globalObject: *jsc.JSGlobalObject) JSError!JSValue { return switch (this.*) { .protocol => |err| err.toJS(globalObject), .postgres_error => |err| postgresErrorToJS(globalObject, null, err), }; } }; + pub const Status = enum { pending, parsing, @@ -174,6 +175,7 @@ const types = @import("./PostgresTypes.zig"); const int4 = types.int4; const bun = @import("bun"); +const JSError = bun.JSError; const String = bun.String; const jsc = bun.jsc; diff --git a/src/sql/postgres/protocol/ErrorResponse.zig b/src/sql/postgres/protocol/ErrorResponse.zig index bc2bde4e47..ee2d5b7ee4 100644 --- a/src/sql/postgres/protocol/ErrorResponse.zig +++ b/src/sql/postgres/protocol/ErrorResponse.zig @@ -33,7 +33,6 @@ pub fn toJS(this: ErrorResponse, globalObject: *jsc.JSGlobalObject) JSValue { var b = bun.StringBuilder{}; defer b.deinit(bun.default_allocator); - // Pre-calculate capacity to avoid reallocations for (this.messages.items) |*msg| { b.cap += switch (msg.*) { inline else => |m| m.utf8ByteLength(), @@ -41,13 +40,14 @@ pub fn toJS(this: ErrorResponse, globalObject: *jsc.JSGlobalObject) JSValue { } b.allocate(bun.default_allocator) catch {}; - // Build a more structured error message var severity: String = String.dead; var code: String = String.dead; var message: String = String.dead; var detail: String = String.dead; var hint: String = String.dead; var position: String = String.dead; + var internalPosition: String = String.dead; + var internal: String = String.dead; var where: String = String.dead; var schema: String = String.dead; var table: String = String.dead; @@ -66,6 +66,8 @@ pub fn toJS(this: ErrorResponse, globalObject: *jsc.JSGlobalObject) JSValue { .detail => |str| detail = str, .hint => |str| hint = str, .position => |str| position = str, + .internal_position => |str| internalPosition = str, + .internal => |str| internal = str, .where => |str| where = str, .schema => |str| schema = str, .table => |str| table = str, @@ -106,44 +108,49 @@ pub fn toJS(this: ErrorResponse, globalObject: *jsc.JSGlobalObject) JSValue { } } - const possible_fields = .{ - .{ "detail", detail, void }, - .{ "hint", hint, void }, - .{ "column", column, void }, - .{ "constraint", constraint, void }, - .{ "datatype", datatype, void }, - // in the past this was set to i32 but postgres returns a strings lets keep it compatible - .{ "errno", code, void }, - .{ "position", position, i32 }, - .{ "schema", schema, void }, - .{ "table", table, void }, - .{ "where", where, void }, - }; - const error_code: jsc.Error = - // https://www.postgresql.org/docs/8.1/errcodes-appendix.html - if (code.eqlComptime("42601")) - .POSTGRES_SYNTAX_ERROR - else - .POSTGRES_SERVER_ERROR; - const err = error_code.fmt(globalObject, "{s}", .{b.allocatedSlice()[0..b.len]}); + const createPostgresError = @import("../AnyPostgresError.zig").createPostgresError; - inline for (possible_fields) |field| { - if (!field.@"1".isEmpty()) { - const value = brk: { - if (field.@"2" == i32) { - if (field.@"1".toInt32()) |val| { - break :brk jsc.JSValue.jsNumberFromInt32(val); - } - } + const errno = if (!code.isEmpty()) code.byteSlice() else null; + const error_code = if (code.eqlComptime("42601")) // syntax error - https://www.postgresql.org/docs/8.1/errcodes-appendix.html + "ERR_POSTGRES_SYNTAX_ERROR" + else + "ERR_POSTGRES_SERVER_ERROR"; - break :brk field.@"1".toJS(globalObject); - }; + const detail_slice = if (detail.isEmpty()) null else detail.byteSlice(); + const hint_slice = if (hint.isEmpty()) null else hint.byteSlice(); + const severity_slice = if (severity.isEmpty()) null else severity.byteSlice(); + const position_slice = if (position.isEmpty()) null else position.byteSlice(); + const internalPosition_slice = if (internalPosition.isEmpty()) null else internalPosition.byteSlice(); + const internalQuery_slice = if (internal.isEmpty()) null else internal.byteSlice(); + const where_slice = if (where.isEmpty()) null else where.byteSlice(); + const schema_slice = if (schema.isEmpty()) null else schema.byteSlice(); + const table_slice = if (table.isEmpty()) null else table.byteSlice(); + const column_slice = if (column.isEmpty()) null else column.byteSlice(); + const dataType_slice = if (datatype.isEmpty()) null else datatype.byteSlice(); + const constraint_slice = if (constraint.isEmpty()) null else constraint.byteSlice(); + const file_slice = if (file.isEmpty()) null else file.byteSlice(); + const line_slice = if (line.isEmpty()) null else line.byteSlice(); + const routine_slice = if (routine.isEmpty()) null else routine.byteSlice(); - err.put(globalObject, jsc.ZigString.static(field.@"0"), value); - } - } - - return err; + return createPostgresError(globalObject, b.allocatedSlice()[0..b.len], .{ + .code = error_code, + .errno = errno, + .detail = detail_slice, + .hint = hint_slice, + .severity = severity_slice, + .position = position_slice, + .internalPosition = internalPosition_slice, + .internalQuery = internalQuery_slice, + .where = where_slice, + .schema = schema_slice, + .table = table_slice, + .column = column_slice, + .dataType = dataType_slice, + .constraint = constraint_slice, + .file = file_slice, + .line = line_slice, + .routine = routine_slice, + }) catch |e| globalObject.takeError(e); } const std = @import("std"); diff --git a/test/integration/bun-types/fixture/sql.ts b/test/integration/bun-types/fixture/sql.ts index 20aab93e96..9128c3a708 100644 --- a/test/integration/bun-types/fixture/sql.ts +++ b/test/integration/bun-types/fixture/sql.ts @@ -249,6 +249,8 @@ expectType(sql([1, 2, 3] as const)).is>(); expectType(sql("users")).is>(); expectType(sql<1>("users")).is>(); +declare const user: { name: "Alice"; email: "alice@example.com" }; + // @ts-expect-error - missing key in object sql(user, "notAKey"); @@ -265,3 +267,9 @@ sql([1, 2, 3], "notAKey"); expectType>(); expectType>(); expectType>(); + +// check some types exist +expectType>; +expectType; +expectType; +expectType>; diff --git a/test/integration/bun-types/fixture/url.ts b/test/integration/bun-types/fixture/url.ts new file mode 100644 index 0000000000..a9ab149d7e --- /dev/null +++ b/test/integration/bun-types/fixture/url.ts @@ -0,0 +1,21 @@ +const myUrl = new URL("hello"); +myUrl.searchParams.toJSON(); + +const mySearchParams = new URLSearchParams("hello"); +mySearchParams.toJSON(); + +import { URL as NodeURL, URLSearchParams as NodeURLSearchParams } from "node:url"; + +const nodeUrl = new NodeURL("hello"); +nodeUrl.searchParams.toJSON(); + +const nodeSearchParams = new NodeURLSearchParams("hello"); +nodeSearchParams.toJSON(); + +import { URL as UrlURL, URLSearchParams as UrlURLSearchParams } from "url"; + +const urlUrl = new UrlURL("hello"); +urlUrl.searchParams.toJSON(); + +const urlSearchParams = new UrlURLSearchParams("hello"); +urlSearchParams.toJSON(); diff --git a/test/integration/bun-types/fixture/utilities.ts b/test/integration/bun-types/fixture/utilities.ts index df761817af..609a9188f7 100644 --- a/test/integration/bun-types/fixture/utilities.ts +++ b/test/integration/bun-types/fixture/utilities.ts @@ -12,7 +12,8 @@ export function expectType(): { * expectType().is(); // pass * ``` */ - is(...args: IfEquals extends true ? [] : [expected: X, butGot: T]): void; + is(...args: IfEquals extends true ? [] : [expected: X, but_got: T]): void; + extends(...args: T extends X ? [] : [expected: T, but_got: X]): void; }; export function expectType(arg: T): { /** diff --git a/test/js/sql/__snapshots__/sqlite-sql.test.ts.snap b/test/js/sql/__snapshots__/sqlite-sql.test.ts.snap new file mode 100644 index 0000000000..73cc9c458d --- /dev/null +++ b/test/js/sql/__snapshots__/sqlite-sql.test.ts.snap @@ -0,0 +1,46 @@ +// Bun Snapshot v1, https://bun.sh/docs/test/snapshots + +exports[`Query Normalization Fuzzing Tests Result Modes values() mode works with PRAGMA commands 1`] = ` +[ + [ + 0, + "type", + "TEXT", + 0, + null, + 0, + ], + [ + 1, + "name", + "TEXT", + 0, + null, + 0, + ], + [ + 2, + "tbl_name", + "TEXT", + 0, + null, + 0, + ], + [ + 3, + "rootpage", + "INT", + 0, + null, + 0, + ], + [ + 4, + "sql", + "TEXT", + 0, + null, + 0, + ], +] +`; diff --git a/test/js/sql/adapter-override.test.ts b/test/js/sql/adapter-override.test.ts new file mode 100644 index 0000000000..4cbc3c1a8a --- /dev/null +++ b/test/js/sql/adapter-override.test.ts @@ -0,0 +1,58 @@ +import { SQL } from "bun"; +import { describe, expect, test } from "bun:test"; + +describe("Adapter Override", () => { + test("postgres:// URL with adapter='sqlite' uses SQLite", async () => { + const sql = new SQL("postgres://localhost:5432/testdb", { + adapter: "sqlite", + filename: ":memory:", + }); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + // Verify it's actually SQLite by running a SQLite-specific query + await sql`CREATE TABLE test (id INTEGER PRIMARY KEY)`; + await sql`INSERT INTO test (id) VALUES (1)`; + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + expect(result[0].id).toBe(1); + + await sql.close(); + }); + + test("sqlite:// URL with adapter='sqlite' works", async () => { + const sql = new SQL("sqlite://:memory:", { + adapter: "sqlite", + }); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql`CREATE TABLE test2 (value TEXT)`; + await sql`INSERT INTO test2 (value) VALUES ('hello')`; + const result = await sql`SELECT * FROM test2`; + expect(result).toHaveLength(1); + expect(result[0].value).toBe("hello"); + + await sql.close(); + }); + + test("no URL with adapter='sqlite' and filename works", async () => { + const sql = new SQL(undefined, { + adapter: "sqlite", + filename: ":memory:", + }); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql`CREATE TABLE test3 (num REAL)`; + await sql`INSERT INTO test3 (num) VALUES (3.14)`; + const result = await sql`SELECT * FROM test3`; + expect(result).toHaveLength(1); + expect(result[0].num).toBeCloseTo(3.14); + + await sql.close(); + }); +}); diff --git a/test/js/sql/sql.test.ts b/test/js/sql/sql.test.ts index 686d4cd8c0..963935f989 100644 --- a/test/js/sql/sql.test.ts +++ b/test/js/sql/sql.test.ts @@ -2,7 +2,7 @@ import { $, randomUUIDv7, sql, SQL } from "bun"; import { afterAll, describe, expect, mock, test } from "bun:test"; import { bunEnv, bunExe, isCI, isLinux, tempDirWithFiles } from "harness"; import path from "path"; -const postgres = (...args) => new sql(...args); +const postgres = (...args) => new SQL(...args); import { exec, execSync } from "child_process"; import net from "net"; @@ -20,18 +20,21 @@ function rel(filename: string) { return path.join(dir, filename); } async function findRandomPort() { - return new Promise((resolve, reject) => { + return new Promise((resolve, reject) => { // Create a server to listen on a random port const server = net.createServer(); server.listen(0, () => { - const port = server.address().port; + const port = (server.address() as import("node:net").AddressInfo).port; server.close(() => resolve(port)); }); server.on("error", reject); }); } -async function waitForPostgres(port) { - for (let i = 0; i < 3; i++) { + +async function waitForPostgres(port: number, count = 10) { + console.log(`Attempting to connect to postgres://postgres@localhost:${port}/postgres`); + + for (let i = 0; i < count; i++) { try { const sql = new SQL(`postgres://postgres@localhost:${port}/postgres`, { idle_timeout: 20, @@ -43,7 +46,10 @@ async function waitForPostgres(port) { console.log("PostgreSQL is ready!"); return true; } catch (error) { - console.log(`Waiting for PostgreSQL... (${i + 1}/3)`); + console.log(`Waiting for PostgreSQL... (${i + 1}/${count})`, error); + if (error && typeof error === "object" && "stack" in error) { + console.log("Error stack:", error.stack); + } await new Promise(resolve => setTimeout(resolve, 1000)); } } @@ -141,24 +147,24 @@ if (isDockerEnabled()) { // --- Expected pg_hba.conf --- process.env.DATABASE_URL = `postgres://bun_sql_test@localhost:${container.port}/bun_sql_test`; - const login = { + const login: Bun.SQL.PostgresOptions = { username: "bun_sql_test", port: container.port, }; - const login_md5 = { + const login_md5: Bun.SQL.PostgresOptions = { username: "bun_sql_test_md5", password: "bun_sql_test_md5", port: container.port, }; - const login_scram = { + const login_scram: Bun.SQL.PostgresOptions = { username: "bun_sql_test_scram", password: "bun_sql_test_scram", port: container.port, }; - const options = { + const options: Bun.SQL.PostgresOptions = { db: "bun_sql_test", username: login.username, password: login.password, @@ -172,6 +178,7 @@ if (isDockerEnabled()) { expect(sql.options.password).toBe("bunbun@bun"); expect(sql.options.database).toBe("bun@bun"); }); + test("Connects with no options", async () => { // we need at least the usename and port await using sql = postgres({ max: 1, port: container.port, username: login.username }); @@ -182,6 +189,9 @@ if (isDockerEnabled()) { }); describe("should work with more than the max inline capacity", () => { + const sql = postgres(options); + afterAll(() => sql.close()); + for (let size of [50, 60, 62, 64, 70, 100]) { for (let duplicated of [true, false]) { test(`${size} ${duplicated ? "+ duplicated" : "unique"} fields`, async () => { @@ -219,6 +229,8 @@ if (isDockerEnabled()) { } catch (e) { error = e; } + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); expect(error.code).toBe(`ERR_POSTGRES_CONNECTION_TIMEOUT`); expect(error.message).toContain("Connection timeout after 4s"); expect(onconnect).not.toHaveBeenCalled(); @@ -240,6 +252,8 @@ if (isDockerEnabled()) { } catch (e) { error = e; } + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); expect(error.code).toBe(`ERR_POSTGRES_IDLE_TIMEOUT`); expect(onconnect).toHaveBeenCalled(); expect(onclose).toHaveBeenCalledTimes(1); @@ -261,6 +275,8 @@ if (isDockerEnabled()) { expect(onconnect).toHaveBeenCalledTimes(1); expect(onclose).not.toHaveBeenCalled(); const err = await onClosePromise.promise; + expect(err).toBeInstanceOf(SQL.SQLError); + expect(err).toBeInstanceOf(SQL.PostgresError); expect(err.code).toBe(`ERR_POSTGRES_IDLE_TIMEOUT`); }); @@ -291,6 +307,8 @@ if (isDockerEnabled()) { expect(onclose).toHaveBeenCalledTimes(1); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); expect(error.code).toBe(`ERR_POSTGRES_LIFETIME_TIMEOUT`); }); @@ -333,6 +351,7 @@ if (isDockerEnabled()) { }); test("query string memory leak test", async () => { + await using sql = postgres(options); Bun.gc(true); const rss = process.memoryUsage.rss(); for (let potato of Array.from({ length: 8 * 1024 }, a => "okkk" + a)) { @@ -362,7 +381,7 @@ if (isDockerEnabled()) { Bun.inspect(result); }); - test("Handles mixed column names", async () => { + test("Basic handles mixed column names", async () => { const result = await sql`select 1 as "1", 2 as "2", 3 as "3", 4 as x`; expect(result).toEqual([{ "1": 1, "2": 2, "3": 3, x: 4 }]); // Sanity check: ensure iterating through the properties doesn't crash. @@ -567,20 +586,23 @@ if (isDockerEnabled()) { test("Throws on illegal transactions", async () => { const sql = postgres({ ...options, max: 2, fetch_types: false }); const error = await sql`begin`.catch(e => e); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); return expect(error.code).toBe("ERR_POSTGRES_UNSAFE_TRANSACTION"); }); test("Transaction throws", async () => { await sql`create table if not exists test (a int)`; try { - expect( - await sql - .begin(async sql => { - await sql`insert into test values(1)`; - await sql`insert into test values('hej')`; - }) - .catch(e => e.errno), - ).toBe("22P02"); + const error = await sql + .begin(async sql => { + await sql`insert into test values(1)`; + await sql`insert into test values('hej')`; + }) + .catch(e => e); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); + expect(error.errno).toBe("22P02"); } finally { await sql`drop table test`; } @@ -757,11 +779,12 @@ if (isDockerEnabled()) { test("Uncaught transaction request errors bubbles to transaction", async () => { const sql = postgres(options); process.nextTick(() => sql.close({ timeout: 1 })); - expect( - await sql - .begin(sql => [sql`select wat`, sql`select current_setting('bun_sql.test') as x, ${1} as a`]) - .catch(e => e.errno || e), - ).toBe("42703"); + const error = await sql + .begin(sql => [sql`select wat`, sql`select current_setting('bun_sql.test') as x, ${1} as a`]) + .catch(e => e); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); + expect(error.errno).toBe("42703"); }); test("Fragments in transactions", async () => { @@ -875,9 +898,10 @@ if (isDockerEnabled()) { test("Throw syntax error", async () => { await using sql = postgres({ ...options, max: 1 }); const err = await sql`wat 1`.catch(x => x); + expect(err).toBeInstanceOf(SQL.SQLError); + expect(err).toBeInstanceOf(SQL.PostgresError); expect(err.errno).toBe("42601"); expect(err.code).toBe("ERR_POSTGRES_SYNTAX_ERROR"); - expect(err).toBeInstanceOf(SyntaxError); }); test("Connect using uri", async () => [ @@ -1025,6 +1049,8 @@ if (isDockerEnabled()) { } catch (e) { err = e; } + expect(err).toBeInstanceOf(SQL.SQLError); + expect(err).toBeInstanceOf(SQL.PostgresError); expect(err.code).toBe("ERR_POSTGRES_SERVER_ERROR"); }); @@ -1196,7 +1222,10 @@ if (isDockerEnabled()) { test("Connection ended error", async () => { const sql = postgres(options); await sql.end(); - return expect(await sql``.catch(x => x.code)).toBe("ERR_POSTGRES_CONNECTION_CLOSED"); + const error = await sql``.catch(x => x); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); + return expect(error.code).toBe("ERR_POSTGRES_CONNECTION_CLOSED"); }); test("Connection end does not cancel query", async () => { @@ -1210,7 +1239,10 @@ if (isDockerEnabled()) { test("Connection destroyed", async () => { const sql = postgres(options); process.nextTick(() => sql.end({ timeout: 0 })); - expect(await sql``.catch(x => x.code)).toBe("ERR_POSTGRES_CONNECTION_CLOSED"); + const error = await sql``.catch(x => x); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); + expect(error.code).toBe("ERR_POSTGRES_CONNECTION_CLOSED"); }); test("Connection destroyed with query before", async () => { @@ -1621,7 +1653,10 @@ if (isDockerEnabled()) { ); test("only allows one statement", async () => { - expect(await sql`select 1; select 2`.catch(e => e.errno)).toBe("42601"); + const error = await sql`select 1; select 2`.catch(e => e); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); + expect(error.errno).toBe("42601"); }); test("await sql() throws not tagged error", async () => { @@ -1629,6 +1664,8 @@ if (isDockerEnabled()) { await sql("select 1"); expect.unreachable(); } catch (e: any) { + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e).toBeInstanceOf(SQL.PostgresError); expect(e.code).toBe("ERR_POSTGRES_NOT_TAGGED_CALL"); } }); @@ -1640,6 +1677,8 @@ if (isDockerEnabled()) { }); expect.unreachable(); } catch (e: any) { + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e).toBeInstanceOf(SQL.PostgresError); expect(e.code).toBe("ERR_POSTGRES_NOT_TAGGED_CALL"); } }); @@ -1651,6 +1690,8 @@ if (isDockerEnabled()) { }); expect.unreachable(); } catch (e: any) { + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e).toBeInstanceOf(SQL.PostgresError); expect(e.code).toBe("ERR_POSTGRES_NOT_TAGGED_CALL"); } }); @@ -1662,6 +1703,8 @@ if (isDockerEnabled()) { }); expect.unreachable(); } catch (e: any) { + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e).toBeInstanceOf(SQL.PostgresError); expect(e.code).toBe("ERR_POSTGRES_NOT_TAGGED_CALL"); } }); @@ -1690,6 +1733,8 @@ if (isDockerEnabled()) { } catch (err) { error = err; } + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); expect(error.code).toBe("ERR_POSTGRES_CONNECTION_CLOSED"); }); @@ -2332,6 +2377,8 @@ if (isDockerEnabled()) { throw new Error("should not reach"); } catch (e) { expect(e).toBeInstanceOf(Error); + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e).toBeInstanceOf(SQL.PostgresError); expect(e.code).toBe("ERR_POSTGRES_CONNECTION_TIMEOUT"); expect(e.message).toMatch(/Connection timed out after 200ms/); } finally { @@ -4643,16 +4690,18 @@ CREATE TABLE ${table_name} ( test("int2[] - overflow behavior", async () => { await using sql = postgres({ ...options, max: 1 }); - expect( - await sql` + const error1 = await sql` SELECT ARRAY[32768]::int2[] -- One more than maximum int2 - `.catch(e => e.errno), - ).toBe("22003"); //smallint out of range - expect( - await sql` + `.catch(e => e); + expect(error1).toBeInstanceOf(SQL.SQLError); + expect(error1).toBeInstanceOf(SQL.PostgresError); + expect(error1.errno).toBe("22003"); //smallint out of range + const error2 = await sql` SELECT ARRAY[-32769]::int2[] -- One less than minimum int2 - `.catch(e => e.errno), - ).toBe("22003"); //smallint out of range + `.catch(e => e); + expect(error2).toBeInstanceOf(SQL.SQLError); + expect(error2).toBeInstanceOf(SQL.PostgresError); + expect(error2.errno).toBe("22003"); //smallint out of range }); }); // old, deprecated not entire documented but we keep the same behavior as postgres.js @@ -5172,17 +5221,19 @@ CREATE TABLE ${table_name} ( await using sql = postgres({ ...options, max: 1 }); // Invalid unicode escape - expect( - await sql` + const error3 = await sql` SELECT ARRAY[E'\\u123']::text[] as invalid_unicode - `.catch(e => e.errno || e), - ).toBe("22025"); + `.catch(e => e); + expect(error3).toBeInstanceOf(SQL.SQLError); + expect(error3).toBeInstanceOf(SQL.PostgresError); + expect(error3.errno).toBe("22025"); // Invalid octal escape - expect( - await sql` + const error4 = await sql` SELECT ARRAY[E'\\400']::text[] as invalid_octal - `.catch(e => e.errno || e), - ).toBe("22021"); + `.catch(e => e); + expect(error4).toBeInstanceOf(SQL.SQLError); + expect(error4).toBeInstanceOf(SQL.PostgresError); + expect(error4.errno).toBe("22021"); // Invalid hex escape expect( await sql` @@ -11109,3 +11160,484 @@ describe("should proper handle connection errors", () => { expect(result.stderr?.toString()).toBeFalsy(); }); }); + +describe("Misc", () => { + test("The Bun.SQL.*Error classes exist", () => { + expect(Bun.SQL.SQLError).toBeDefined(); + expect(Bun.SQL.PostgresError).toBeDefined(); + expect(Bun.SQL.SQLiteError).toBeDefined(); + + expect(Bun.SQL.SQLError.name).toBe("SQLError"); + expect(Bun.SQL.PostgresError.name).toBe("PostgresError"); + expect(Bun.SQL.SQLiteError.name).toBe("SQLiteError"); + + expect(Bun.SQL.SQLError.prototype).toBeInstanceOf(Error); + expect(Bun.SQL.PostgresError.prototype).toBeInstanceOf(Bun.SQL.SQLError); + expect(Bun.SQL.SQLiteError.prototype).toBeInstanceOf(Bun.SQL.SQLError); + }); + + describe("Adapter override URL parsing", () => { + test("explicit adapter='sqlite' overrides postgres:// URL", async () => { + // Even though URL suggests postgres, explicit adapter should win + const sql = new Bun.SQL("postgres://localhost:5432/testdb", { + adapter: "sqlite", + filename: ":memory:", + }); + + // Verify it's actually SQLite by checking the adapter type + expect(sql.options.adapter).toBe("sqlite"); + + // SQLite-specific operation should work + await sql`CREATE TABLE test_adapter (id INTEGER PRIMARY KEY)`; + await sql`INSERT INTO test_adapter (id) VALUES (1)`; + const result = await sql`SELECT * FROM test_adapter`; + expect(result).toHaveLength(1); + + await sql.close(); + }); + + test("explicit adapter='postgres' with sqlite:// URL should throw as invalid url", async () => { + let sql: Bun.SQL | undefined; + let error: unknown; + + try { + sql = new Bun.SQL("sqlite://:memory:", { + adapter: "postgres", + hostname: "localhost", + port: 5432, + username: "postgres", + password: "", + database: "testdb", + max: 1, + }); + + expect(false).toBeTrue(); + } catch (e) { + error = e; + } + + expect(error).toBeInstanceOf(Error); + expect(error.message).toMatchInlineSnapshot( + `"Invalid URL 'sqlite://:memory:' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?"`, + ); + expect(sql).toBeUndefined(); + }); + + test("explicit adapter='sqlite' with sqlite:// URL works", async () => { + // Both URL and adapter agree on sqlite + const sql = new Bun.SQL("sqlite://:memory:", { + adapter: "sqlite", + }); + + expect(sql.options.adapter).toBe("sqlite"); + + await sql`CREATE TABLE test_consistent (id INTEGER)`; + await sql`INSERT INTO test_consistent VALUES (42)`; + const result = await sql`SELECT * FROM test_consistent`; + expect(result).toHaveLength(1); + expect(result[0].id).toBe(42); + + await sql.close(); + }); + + test("explicit adapter='postgres' with postgres:// URL works", async () => { + // Skip if no postgres available + if (!process.env.DATABASE_URL) { + return; + } + + // Both URL and adapter agree on postgres + const sql = new Bun.SQL(process.env.DATABASE_URL, { + adapter: "postgres", + max: 1, + }); + + expect(sql.options.adapter).toBe("postgres"); + + const randomTable = "test_consistent_" + Math.random().toString(36).substring(7); + await sql`CREATE TEMP TABLE ${sql(randomTable)} (value INT)`; + await sql`INSERT INTO ${sql(randomTable)} VALUES (42)`; + const result = await sql`SELECT * FROM ${sql(randomTable)}`; + expect(result).toHaveLength(1); + expect(result[0].value).toBe(42); + + await sql.close(); + }); + + test("explicit adapter overrides even with conflicting connection string patterns", async () => { + // Test that adapter explicitly set to sqlite works even with postgres-like connection info + const sql = new Bun.SQL(undefined as never, { + adapter: "sqlite", + filename: ":memory:", + hostname: "localhost", // These would normally suggest postgres + port: 5432, + username: "postgres", + password: "password", + database: "testdb", + }); + + expect(sql.options.adapter).toBe("sqlite"); + + // Should still work as SQLite + await sql`CREATE TABLE override_test (name TEXT)`; + await sql`INSERT INTO override_test VALUES ('test')`; + const result = await sql`SELECT * FROM override_test`; + expect(result).toHaveLength(1); + expect(result[0].name).toBe("test"); + + await sql.close(); + }); + }); + + describe("SQL Error Classes", () => { + describe("SQLError base class", () => { + test("SQLError should be a constructor", () => { + expect(typeof SQL.SQLError).toBe("function"); + expect(SQL.SQLError.name).toBe("SQLError"); + }); + + test("SQLError should extend Error", () => { + const error = new SQL.SQLError("Test error"); + expect(error).toBeInstanceOf(Error); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error.message).toBe("Test error"); + expect(error.name).toBe("SQLError"); + }); + + test("SQLError should have proper stack trace", () => { + const error = new SQL.SQLError("Test error"); + expect(error.stack).toContain("SQLError"); + expect(error.stack).toContain("Test error"); + }); + + test("SQLError should be catchable as base class", () => { + try { + throw new SQL.SQLError("Test error"); + } catch (e) { + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e).toBeInstanceOf(Error); + } + }); + }); + + describe("PostgresError class", () => { + test("PostgresError should be a constructor", () => { + expect(typeof SQL.PostgresError).toBe("function"); + expect(SQL.PostgresError.name).toBe("PostgresError"); + }); + + test("PostgresError should extend SQLError", () => { + const error = new SQL.PostgresError("Postgres error", { + code: "00000", + detail: "", + hint: "", + severity: "ERROR", + }); + expect(error).toBeInstanceOf(Error); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); + expect(error.message).toBe("Postgres error"); + expect(error.name).toBe("PostgresError"); + }); + + test("PostgresError should have Postgres-specific properties", () => { + // Test with common properties that we'll definitely have + const error = new SQL.PostgresError("Postgres error", { + code: "23505", + detail: "Key (id)=(1) already exists.", + hint: "Try using a different ID.", + severity: "ERROR", + }); + + expect(error.code).toBe("23505"); + expect(error.detail).toBe("Key (id)=(1) already exists."); + expect(error.hint).toBe("Try using a different ID."); + expect(error.severity).toBe("ERROR"); + }); + + test("PostgresError should support extended properties when available", () => { + // Test that we can include additional properties when they're provided by Postgres + const error = new SQL.PostgresError("Postgres error", { + code: "23505", + detail: "Duplicate key value", + hint: "", + severity: "ERROR", + schema: "public", + table: "users", + constraint: "users_pkey", + }); + + expect(error.code).toBe("23505"); + expect(error.detail).toBe("Duplicate key value"); + expect(error.schema).toBe("public"); + expect(error.table).toBe("users"); + expect(error.constraint).toBe("users_pkey"); + }); + + test("PostgresError should be catchable as SQLError", () => { + try { + throw new SQL.PostgresError("Postgres error", { + code: "00000", + detail: "", + hint: "", + severity: "ERROR", + }); + } catch (e) { + if (e instanceof SQL.SQLError) { + expect(e).toBeInstanceOf(SQL.PostgresError); + } else { + throw new Error("Should be catchable as SQLError"); + } + } + }); + + test("PostgresError with minimal properties", () => { + const error = new SQL.PostgresError("Connection failed", { + code: "", + detail: "", + hint: "", + severity: "ERROR", + }); + expect(error.message).toBe("Connection failed"); + expect(error.code).toBe(""); + expect(error.detail).toBe(""); + }); + }); + + describe("SQLiteError class", () => { + test("SQLiteError should be a constructor", () => { + expect(typeof SQL.SQLiteError).toBe("function"); + expect(SQL.SQLiteError.name).toBe("SQLiteError"); + }); + + test("SQLiteError should extend SQLError", () => { + const error = new SQL.SQLiteError("SQLite error", { + code: "SQLITE_ERROR", + errno: 1, + }); + expect(error).toBeInstanceOf(Error); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.SQLiteError); + expect(error.message).toBe("SQLite error"); + expect(error.name).toBe("SQLiteError"); + }); + + test("SQLiteError should have SQLite-specific properties", () => { + const error = new SQL.SQLiteError("UNIQUE constraint failed: users.email", { + code: "SQLITE_CONSTRAINT_UNIQUE", + errno: 2067, + }); + + expect(error.code).toBe("SQLITE_CONSTRAINT_UNIQUE"); + expect(error.errno).toBe(2067); + expect(error.message).toBe("UNIQUE constraint failed: users.email"); + }); + + test("SQLiteError should be catchable as SQLError", () => { + try { + throw new SQL.SQLiteError("SQLite error", { + code: "SQLITE_ERROR", + errno: 1, + }); + } catch (e) { + if (e instanceof SQL.SQLError) { + expect(e).toBeInstanceOf(SQL.SQLiteError); + } else { + throw new Error("Should be catchable as SQLError"); + } + } + }); + + test("SQLiteError with minimal properties", () => { + const error = new SQL.SQLiteError("Database locked", { + code: "SQLITE_BUSY", + errno: 5, + }); + expect(error.message).toBe("Database locked"); + expect(error.code).toBe("SQLITE_BUSY"); + expect(error.errno).toBe(5); + }); + }); + + describe("Error hierarchy and instanceof checks", () => { + test("can differentiate between PostgresError and SQLiteError", () => { + const pgError = new SQL.PostgresError("pg error", { + code: "00000", + detail: "", + hint: "", + severity: "ERROR", + }); + const sqliteError = new SQL.SQLiteError("sqlite error", { + code: "SQLITE_ERROR", + errno: 1, + }); + + expect(pgError instanceof SQL.PostgresError).toBe(true); + expect(pgError instanceof SQL.SQLiteError).toBe(false); + expect(pgError instanceof SQL.SQLError).toBe(true); + + expect(sqliteError instanceof SQL.SQLiteError).toBe(true); + expect(sqliteError instanceof SQL.PostgresError).toBe(false); + expect(sqliteError instanceof SQL.SQLError).toBe(true); + }); + + test("can catch all SQL errors with base class", () => { + const errors = [ + new SQL.PostgresError("pg error", { + code: "00000", + detail: "", + hint: "", + severity: "ERROR", + }), + new SQL.SQLiteError("sqlite error", { + code: "SQLITE_ERROR", + errno: 1, + }), + new SQL.SQLError("generic sql error"), + ]; + + for (const error of errors) { + try { + throw error; + } catch (e) { + expect(e).toBeInstanceOf(SQL.SQLError); + } + } + }); + + test("error.toString() returns proper format", () => { + const pgError = new SQL.PostgresError("connection failed", { + code: "08001", + detail: "", + hint: "", + severity: "ERROR", + }); + const sqliteError = new SQL.SQLiteError("database locked", { + code: "SQLITE_BUSY", + errno: 5, + }); + const sqlError = new SQL.SQLError("generic error"); + + expect(pgError.toString()).toContain("PostgresError"); + expect(pgError.toString()).toContain("connection failed"); + + expect(sqliteError.toString()).toContain("SQLiteError"); + expect(sqliteError.toString()).toContain("database locked"); + + expect(sqlError.toString()).toContain("SQLError"); + expect(sqlError.toString()).toContain("generic error"); + }); + }); + + describe("Integration with actual database operations", () => { + describe("SQLite errors", () => { + test("SQLite constraint violation throws SQLiteError", async () => { + const dir = tempDirWithFiles("sqlite-error-test", {}); + const dbPath = path.join(dir, "test.db"); + + const db = new SQL({ filename: dbPath, adapter: "sqlite" }); + + await db` + CREATE TABLE users ( + id INTEGER PRIMARY KEY, + email TEXT UNIQUE NOT NULL + ) + `; + + await db`INSERT INTO users (email) VALUES ('test@example.com')`; + + try { + await db`INSERT INTO users (email) VALUES ('test@example.com')`; + throw new Error("Should have thrown an error"); + } catch (e) { + expect(e).toBeInstanceOf(SQL.SQLiteError); + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e.message).toContain("UNIQUE constraint failed"); + expect(e.code).toContain("SQLITE_CONSTRAINT"); + } + + await db.close(); + }); + + test("SQLite syntax error throws SQLiteError", async () => { + const dir = tempDirWithFiles("sqlite-syntax-error-test", {}); + const dbPath = path.join(dir, "test.db"); + + const db = new SQL({ filename: dbPath, adapter: "sqlite" }); + + try { + await db`SELCT * FROM nonexistent`; + throw new Error("Should have thrown an error"); + } catch (e) { + expect(e).toBeInstanceOf(SQL.SQLiteError); + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e.message).toContain("syntax error"); + expect(e.code).toBe("SQLITE_ERROR"); + } + + await db.close(); + }); + + test("SQLite database locked throws SQLiteError", async () => { + const dir = tempDirWithFiles("sqlite-locked-test", {}); + const dbPath = path.join(dir, "test.db"); + + await using db1 = new SQL({ filename: dbPath, adapter: "sqlite" }); + await using db2 = new SQL({ filename: dbPath, adapter: "sqlite" }); + + await db1`CREATE TABLE test (id INTEGER PRIMARY KEY)`; + + await db1`BEGIN EXCLUSIVE TRANSACTION`; + await db1`INSERT INTO test (id) VALUES (1)`; + + try { + await db2`INSERT INTO test (id) VALUES (2)`; + throw new Error("Should have thrown an error"); + } catch (e) { + expect(e).toBeInstanceOf(SQL.SQLiteError); + expect(e).toBeInstanceOf(SQL.SQLError); + expect(e.code).toBe("SQLITE_BUSY"); + } + + await db1`COMMIT`; + }); + }); + }); + + describe("Type guards", () => { + test("can use instanceof for type narrowing", () => { + function handleError(e: unknown) { + if (e instanceof SQL.PostgresError) { + return `PG: ${e.code}`; + } else if (e instanceof SQL.SQLiteError) { + return `SQLite: ${e.errno}`; + } else if (e instanceof SQL.SQLError) { + return `SQL: ${e.message}`; + } + return "Unknown error"; + } + + expect( + handleError( + new SQL.PostgresError("test", { + code: "23505", + detail: "", + hint: "", + severity: "ERROR", + }), + ), + ).toBe("PG: 23505"); + expect( + handleError( + new SQL.SQLiteError("test", { + code: "SQLITE_BUSY", + errno: 5, + }), + ), + ).toBe("SQLite: 5"); + expect(handleError(new SQL.SQLError("test"))).toBe("SQL: test"); + expect(handleError(new Error("test"))).toBe("Unknown error"); + }); + }); + }); +}); diff --git a/test/js/sql/sqlite-sql.test.ts b/test/js/sql/sqlite-sql.test.ts new file mode 100644 index 0000000000..a735e0e221 --- /dev/null +++ b/test/js/sql/sqlite-sql.test.ts @@ -0,0 +1,5145 @@ +import { SQL } from "bun"; +import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, mock, test } from "bun:test"; +import { tempDirWithFiles } from "harness"; +import { existsSync } from "node:fs"; +import { rm, stat } from "node:fs/promises"; +import { join } from "node:path"; +import path from "path"; + +describe("Connection & Initialization", () => { + describe("common default connection strings", () => { + test("should parse common connection strings", () => { + const memory = new SQL(":memory:"); + expect(memory.options.adapter).toBe("sqlite"); + expect(memory.options.filename).toBe(":memory:"); + + const myapp = new SQL("sqlite://myapp.db"); + expect(myapp.options.adapter).toBe("sqlite"); + expect(myapp.options.filename).toBe("myapp.db"); + + const myapp2 = new SQL("myapp.db", { adapter: "sqlite" }); + expect(myapp2.options.adapter).toBe("sqlite"); + expect(myapp2.options.filename).toBe("myapp.db"); + + expect(() => new SQL("myapp.db")).toThrowErrorMatchingInlineSnapshot( + `"Invalid URL 'myapp.db' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?"`, + ); + + const postgres = new SQL("postgres://user1:pass2@localhost:5432/mydb"); + expect(postgres.options.adapter).not.toBe("sqlite"); + }); + }); + + test("should connect to in-memory SQLite database", async () => { + const sql = new SQL("sqlite://:memory:"); + expect(sql).toBeDefined(); + expect(sql.options.adapter).toBe("sqlite"); + await sql.close(); + }); + + test("should connect to file-based SQLite database", async () => { + const dir = tempDirWithFiles("sqlite-db-test", {}); + const dbPath = path.join(dir, "test.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql).toBeDefined(); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle connection with options object", async () => { + const sql = new SQL({ + adapter: "sqlite", + filename: ":memory:", + }); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql`CREATE TABLE test (id INTEGER)`; + await sql`INSERT INTO test VALUES (1)`; + + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + + await sql.close(); + }); + + test("onconnect and onclose callbacks are invoked for SQLite", async () => { + const onconnect = mock((err?: any) => {}); + const onclose = mock((err?: any) => {}); + + const sql = new SQL({ + adapter: "sqlite", + filename: ":memory:", + onconnect, + onclose, + }); + + // onconnect should run during creation with null error + expect(onconnect).toHaveBeenCalled(); + const onconnectArg = onconnect.mock.calls[0]?.[0]; + expect(onconnectArg === null).toBe(true); + await sql`SELECT 1 as x`; + await sql.close(); + expect(onclose).toHaveBeenCalled(); + const oncloseArg = onclose.mock.calls[0]?.[0]; + expect(oncloseArg instanceof Error).toBe(true); + }); + + test("onconnect receives Error when open fails (readonly non-existent)", async () => { + const dir = tempDirWithFiles("sqlite-onconnect-error", {}); + const dbPath = join(dir, "missing.db"); + + const onconnect = mock((err?: any) => {}); + const onclose = mock((err?: any) => {}); + + const sql = new SQL({ + adapter: "sqlite", + filename: dbPath, + // open in readonly so creation is not allowed + readonly: true, + onconnect, + onclose, + }); + + // Should have been called with an Error + expect(onconnect).toHaveBeenCalled(); + const arg = onconnect.mock.calls[0]?.[0]; + expect(arg instanceof Error).toBe(true); + + await sql.close(); + expect(onclose).toHaveBeenCalled(); + + await rm(dir, { recursive: true }); + }); + + test("should create database file if it doesn't exist", async () => { + const dir = tempDirWithFiles("sqlite-create-test", {}); + const dbPath = path.join(dir, "new.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + await sql`CREATE TABLE test (id INTEGER)`; + + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should work with relative paths", async () => { + const dir = tempDirWithFiles("sqlite-test", {}); + const sql = new SQL({ + adapter: "sqlite", + filename: path.join(dir, "test.db"), + }); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(path.join(dir, "test.db")); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + describe("Environment Variable Handling", () => { + let originalEnv: NodeJS.ProcessEnv; + + beforeEach(() => { + originalEnv = { ...Bun.env }; + }); + + afterEach(() => { + // Restore original env vars + for (const key in Bun.env) { + if (!(key in originalEnv)) { + delete Bun.env[key]; + } + } + for (const key in originalEnv) { + Bun.env[key] = originalEnv[key]; + } + }); + + test("should use DATABASE_URL for SQLite when it's a SQLite URL", async () => { + const dir = tempDirWithFiles("sqlite-env-test", {}); + const dbPath = path.join(dir, "env-test.db"); + + Bun.env.DATABASE_URL = `sqlite://${dbPath}`; + + const sql = new SQL(); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle DATABASE_URL with :memory:", async () => { + Bun.env.DATABASE_URL = "sqlite://:memory:"; + + const sql = new SQL(); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql`CREATE TABLE test (id INTEGER)`; + await sql`INSERT INTO test VALUES (1)`; + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + + await sql.close(); + }); + + test("should handle SQLITE_URL env var when specified", async () => { + const dir = tempDirWithFiles("sqlite-url-test", {}); + const dbPath = path.join(dir, "sqlite-url.db"); + + // This doesn't exist in the current implementation but testing anyway + Bun.env.SQLITE_URL = `sqlite://${dbPath}`; + Bun.env.DATABASE_URL = "postgres://localhost/test"; // Should be ignored when adapter is sqlite + + const sql = new SQL("sqlite://:memory:"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should NOT use POSTGRES_URL for SQLite", async () => { + Bun.env.POSTGRES_URL = "postgres://user:pass@localhost:5432/mydb"; + + // When explicitly using sqlite adapter, POSTGRES_URL should be ignored + const sql = new SQL({ adapter: "sqlite", filename: ":memory:" }); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql.close(); + }); + + test("should NOT use PGURL for SQLite", async () => { + Bun.env.PGURL = "postgres://user:pass@localhost:5432/mydb"; + + const sql = new SQL({ adapter: "sqlite", filename: ":memory:" }); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql.close(); + }); + + test("should NOT use PG_URL for SQLite", async () => { + Bun.env.PG_URL = "postgres://user:pass@localhost:5432/mydb"; + + const sql = new SQL({ adapter: "sqlite", filename: ":memory:" }); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + + await sql.close(); + }); + + test("should throw error when POSTGRES_URL is used without adapter specification", () => { + Bun.env.POSTGRES_URL = "postgres://user:pass@localhost:5432/mydb"; + Bun.env.DATABASE_URL = undefined; + + // This should create a postgres connection, not sqlite + const sql = new SQL(); + expect(sql.options.adapter).toBe("postgres"); + sql.close(); + }); + + test("should handle multiple env vars with precedence", async () => { + // Test precedence: POSTGRES_URL > DATABASE_URL > PGURL > PG_URL + Bun.env.PG_URL = "postgres://pg_url@localhost:5432/pg_db"; + Bun.env.PGURL = "postgres://pgurl@localhost:5432/pgurl_db"; + Bun.env.DATABASE_URL = "sqlite://:memory:"; + Bun.env.POSTGRES_URL = "postgres://postgres@localhost:5432/postgres_db"; + + const sql = new SQL(); + // POSTGRES_URL takes precedence + expect(sql.options.adapter).toBe("postgres"); + await sql.close(); + + // Remove POSTGRES_URL + delete Bun.env.POSTGRES_URL; + const sql2 = new SQL(); + // DATABASE_URL takes next precedence and it's SQLite (detected via :memory:) + expect(sql2.options.adapter).toBe("sqlite"); + expect(sql2.options.filename).toBe(":memory:"); + await sql2.close(); + }); + }); + + describe("file:// Protocol URLs", () => { + test("should handle file:// URLs", async () => { + const dir = tempDirWithFiles("file-protocol-test", {}); + const dbPath = path.join(dir, "file-test.db"); + + const sql = new SQL(`file://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle file: URLs without slashes", async () => { + const dir = tempDirWithFiles("file-no-slash-test", {}); + const dbPath = path.join(dir, "file-noslash.db"); + + const sql = new SQL(`file:${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle file:// with :memory:", () => { + const sql = new SQL("file://:memory:"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + }); + + describe("Query Parameters in SQLite URLs", () => { + test("should handle mode=ro (readonly)", async () => { + const dir = tempDirWithFiles("readonly-test", {}); + const dbPath = path.join(dir, "readonly.db"); + + // First create a database + const createSql = new SQL(`sqlite://${dbPath}`); + await createSql`CREATE TABLE test (id INTEGER)`; + await createSql`INSERT INTO test VALUES (1)`; + await createSql.close(); + + // Now open in readonly mode + const sql = new SQL(`sqlite://${dbPath}?mode=ro`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.readonly).toBe(true); + + // Should be able to read + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + + expect(sql`INSERT INTO test VALUES (2)`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"attempt to write a readonly database"`, + ); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle mode=rw (read-write)", async () => { + const dir = tempDirWithFiles("readwrite-test", {}); + const dbPath = path.join(dir, "readwrite.db"); + + // Create database first + const createSql = new SQL(`sqlite://${dbPath}`); + await createSql`CREATE TABLE test (id INTEGER)`; + await createSql.close(); + + const sql = new SQL(`sqlite://${dbPath}?mode=rw`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.readonly).toBe(false); + + await sql`INSERT INTO test VALUES (1)`; + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle mode=rwc (read-write-create)", async () => { + const dir = tempDirWithFiles("rwc-test", {}); + const dbPath = path.join(dir, "rwc.db"); + + const sql = new SQL(`sqlite://${dbPath}?mode=rwc`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.readonly).toBe(false); + expect(sql.options.create).toBe(true); + + await sql`CREATE TABLE test (id INTEGER)`; + await sql`INSERT INTO test VALUES (1)`; + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle multiple query parameters", async () => { + const dir = tempDirWithFiles("multi-param-test", {}); + const dbPath = path.join(dir, "multi.db"); + + // Note: Only mode is supported for SQLite, other params should be ignored + const sql = new SQL(`sqlite://${dbPath}?mode=rwc&cache=shared&timeout=5000`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.create).toBe(true); + + await sql`CREATE TABLE test (id INTEGER)`; + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should ignore fragment in URL", async () => { + const dir = tempDirWithFiles("fragment-test", {}); + const dbPath = path.join(dir, "test#file.db"); + + // # is a valid filename character in SQLite + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + }); + + describe("Special Characters in Filenames", () => { + test("should handle spaces in filename", async () => { + const dir = tempDirWithFiles("space-test", {}); + const dbPath = path.join(dir, "test database.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle special characters # and % in filename", async () => { + const dir = tempDirWithFiles("special-chars-test", {}); + const dbPath = path.join(dir, "test#123%data.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle unicode characters in filename", async () => { + const dir = tempDirWithFiles("unicode-test", {}); + const dbPath = path.join(dir, "测试数据库.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle dots in filename", async () => { + const dir = tempDirWithFiles("dots-test", {}); + const dbPath = path.join(dir, "my.app.v2.0.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + }); + + describe("Path Handling", () => { + test("should handle absolute paths", async () => { + const dir = tempDirWithFiles("absolute-test", {}); + const dbPath = path.resolve(dir, "absolute.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle relative paths with ./", async () => { + const dir = tempDirWithFiles("relative-dot-test", {}); + const originalCwd = process.cwd(); + + try { + process.chdir(dir); + + const sql = new SQL("sqlite://./test.db"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe("./test.db"); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(path.join(dir, "test.db")); + expect(stats.isFile()).toBe(true); + + await sql.close(); + } finally { + process.chdir(originalCwd); + await rm(dir, { recursive: true }); + } + }); + + test("should handle paths with ../", async () => { + const parentDir = tempDirWithFiles("parent-test", {}); + const childDir = path.join(parentDir, "child"); + await Bun.$`mkdir -p ${childDir}`; + const originalCwd = process.cwd(); + + try { + process.chdir(childDir); + + const sql = new SQL("sqlite://../parent.db"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe("../parent.db"); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(path.join(parentDir, "parent.db")); + expect(stats.isFile()).toBe(true); + + await sql.close(); + } finally { + process.chdir(originalCwd); + await rm(parentDir, { recursive: true }); + } + }); + + test("should handle nested paths", async () => { + const dir = tempDirWithFiles("nested-test", {}); + const nestedPath = path.join(dir, "data", "databases", "app.db"); + await Bun.$`mkdir -p ${path.dirname(nestedPath)}`; + + const sql = new SQL(`sqlite://${nestedPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(nestedPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(nestedPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + }); + + describe("Empty and Invalid URLs", () => { + test("should handle empty string with adapter specified", () => { + const sql = new SQL("", { adapter: "sqlite" }); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + + test("should handle null/undefined with adapter specified", () => { + const sql1 = new SQL(undefined as never, { adapter: "sqlite" }); + expect(sql1.options.adapter).toBe("sqlite"); + expect(sql1.options.filename).toBe(":memory:"); + sql1.close(); + + const sql2 = new SQL({ adapter: "sqlite" }); + expect(sql2.options.adapter).toBe("sqlite"); + expect(sql2.options.filename).toBe(":memory:"); + sql2.close(); + }); + + test("should handle sqlite: without path", () => { + const sql = new SQL("sqlite:"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(""); + sql.close(); + }); + + test("should handle sqlite:// without path", () => { + const sql = new SQL("sqlite://"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(""); + sql.close(); + }); + + test("should handle just :memory: without prefix", () => { + const sql = new SQL(":memory:"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + + test("should handle sqlite:memory without :", () => { + const sql = new SQL("sqlite:memory"); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + + test("should throw for invalid URL without adapter", () => { + expect(() => new SQL("not-a-url")).toThrowErrorMatchingInlineSnapshot( + `"Invalid URL 'not-a-url' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?"`, + ); + }); + + test("should throw for postgres URL when sqlite adapter is expected", () => { + expect(() => new SQL("myapp.db")).toThrowErrorMatchingInlineSnapshot( + `"Invalid URL 'myapp.db' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?"`, + ); + }); + }); + + describe("Mixed Configurations", () => { + test("should prefer explicit URL over env var", async () => { + const dir = tempDirWithFiles("explicit-test", {}); + const envPath = path.join(dir, "env.db"); + const explicitPath = path.join(dir, "explicit.db"); + + Bun.env.DATABASE_URL = `sqlite://${envPath}`; + + const sql = new SQL(`sqlite://${explicitPath}`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(explicitPath); + + await sql`CREATE TABLE test (id INTEGER)`; + expect(existsSync(explicitPath)).toBe(true); + expect(existsSync(envPath)).toBe(false); + + await sql.close(); + delete Bun.env.DATABASE_URL; + await rm(dir, { recursive: true }); + }); + + test("should prefer options object over URL", async () => { + const dir = tempDirWithFiles("options-override-test", {}); + const urlPath = path.join(dir, "url.db"); + const optionsPath = path.join(dir, "options.db"); + + const sql = new SQL(`sqlite://${urlPath}`, { + adapter: "sqlite", + filename: optionsPath, + } as { adapter: "sqlite" }); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(optionsPath); + + await sql`CREATE TABLE test (id INTEGER)`; + expect(existsSync(optionsPath)).toBe(true); + expect(existsSync(urlPath)).toBe(false); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("should handle URL in options object", async () => { + const dir = tempDirWithFiles("url-in-options-test", {}); + const dbPath = path.join(dir, "url-options.db"); + + const sql = new SQL({ + adapter: "sqlite", + filename: `sqlite://${dbPath}`, + }); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(dbPath); + + await sql`CREATE TABLE test (id INTEGER)`; + const stats = await stat(dbPath); + expect(stats.isFile()).toBe(true); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + }); + + describe("Error Cases", () => { + test("should throw for unsupported adapter", () => { + expect(() => new SQL({ adapter: "mysql" as any })).toThrowErrorMatchingInlineSnapshot( + `"Unsupported adapter: mysql. Supported adapters: "postgres", "sqlite""`, + ); + }); + + test("should interpret ambiguous strings as postgres connection", () => { + // This gets interpreted as postgres with hostname "localhost" and database "432/mydb" + const sql = new SQL("localhost:5432/mydb"); + expect(sql.options.adapter).toBe("postgres"); + sql.close(); + }); + + test("SQLite readonly mode creates connection but fails on missing file access", async () => { + const dir = tempDirWithFiles("ro-nonexist-test", {}); + const dbPath = path.join(dir, "nonexistent.db"); + + const sql = new SQL(`sqlite://${dbPath}?mode=ro`); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.readonly).toBe(true); + expect(sql.options.filename).toBe(dbPath); + + expect(sql`SELECT 1`.execute()).rejects.toThrowErrorMatchingInlineSnapshot(`"unable to open database file"`); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + }); +}); + +describe("Data Types & Values", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("handles NULL values", async () => { + await sql`CREATE TABLE nulls (id INTEGER, value TEXT)`; + await sql`INSERT INTO nulls (id, value) VALUES (1, ${null})`; + + const result = await sql`SELECT * FROM nulls`; + expect(result[0].value).toBeNull(); + }); + + test("handles INTEGER values", async () => { + const values = [0, 1, -1, 2147483647, -2147483648]; + await sql`CREATE TABLE integers (value INTEGER)`; + + for (const val of values) { + await sql`INSERT INTO integers VALUES (${val})`; + } + + const results = await sql<{ value: number }[]>`SELECT * FROM integers`; + expect(results.map(r => r.value)).toEqual(values); + }); + + test("INSERT containing literal 'RETURNING' should not be treated as RETURNING", async () => { + await sql`CREATE TABLE insert_literal_returning (name TEXT)`; + const res = await sql`INSERT INTO insert_literal_returning (name) VALUES ('RETURNING')`; + expect(res.command).toBe("INSERT"); + expect(res.count).toBe(1); + + const rows = await sql`SELECT COUNT(*) AS c FROM insert_literal_returning`; + expect(rows[0].c).toBe(1); + }); + + test("handles REAL values", async () => { + const values = [0.0, 1.1, -1.1, 3.14159, Number.MAX_SAFE_INTEGER + 0.1]; + await sql`CREATE TABLE reals (value REAL)`; + + for (const val of values) { + await sql`INSERT INTO reals VALUES (${val})`; + } + + const results = await sql`SELECT * FROM reals`; + results.forEach((r, i) => { + expect(r.value).toBeCloseTo(values[i], 10); + }); + }); + + test("handles TEXT values", async () => { + const values = ["", "hello", "hello world", "unicode: 你好 🌍", "'quotes'", '"double quotes"']; + await sql`CREATE TABLE texts (value TEXT)`; + + for (const val of values) { + await sql`INSERT INTO texts VALUES (${val})`; + } + + const results = await sql`SELECT * FROM texts`; + expect(results.map(r => r.value)).toEqual(values); + }); + + test("handles BLOB values", async () => { + const buffer = Buffer.from([0x00, 0x01, 0x02, 0x03, 0xff]); + await sql`CREATE TABLE blobs (value BLOB)`; + await sql`INSERT INTO blobs VALUES (${buffer})`; + + const result = await sql`SELECT * FROM blobs`; + expect(Buffer.from(result[0].value)).toEqual(buffer); + }); + + test("handles boolean values (stored as INTEGER)", async () => { + await sql`CREATE TABLE bools (value INTEGER)`; + await sql`INSERT INTO bools VALUES (${true}), (${false})`; + + const results = await sql`SELECT * FROM bools`; + expect(results[0].value).toBe(1); + expect(results[1].value).toBe(0); + }); + + test("handles Date values (stored as TEXT)", async () => { + const date = new Date("2024-01-01T12:00:00Z"); + await sql`CREATE TABLE dates (value TEXT)`; + await sql`INSERT INTO dates VALUES (${date.toISOString()})`; + + const result = await sql`SELECT * FROM dates`; + expect(new Date(result[0].value)).toEqual(date); + }); + + test("handles JSON values (stored as TEXT)", async () => { + const jsonData = { name: "Test", values: [1, 2, 3], nested: { key: "value" } }; + await sql`CREATE TABLE json_data (value TEXT)`; + await sql`INSERT INTO json_data VALUES (${JSON.stringify(jsonData)})`; + + const result = await sql`SELECT * FROM json_data`; + expect(JSON.parse(result[0].value)).toEqual(jsonData); + }); +}); + +describe("Query Execution", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("CREATE TABLE", async () => { + const result = await sql`CREATE TABLE users ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + email TEXT UNIQUE, + age INTEGER CHECK (age >= 0), + created_at TEXT DEFAULT CURRENT_TIMESTAMP + )`; + + expect(result.command).toBe("CREATE"); + }); + + test("INSERT with RETURNING", async () => { + await sql`CREATE TABLE items (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)`; + + const result = await sql`INSERT INTO items (name) VALUES (${"Item1"}) RETURNING *`; + expect(result).toHaveLength(1); + expect(result[0].id).toBe(1); + expect(result[0].name).toBe("Item1"); + expect(result.command).toBe("INSERT"); + }); + + test("UPDATE with affected rows", async () => { + await sql`CREATE TABLE products (id INTEGER PRIMARY KEY, price REAL)`; + await sql`INSERT INTO products VALUES (1, 10.0), (2, 20.0), (3, 30.0)`; + + const result = await sql`UPDATE products SET price = price * 1.1 WHERE price < 25`; + expect(result.count).toBe(2); + expect(result.command).toBe("UPDATE"); + }); + + test("DELETE with affected rows", async () => { + await sql`CREATE TABLE tasks (id INTEGER PRIMARY KEY, done INTEGER)`; + await sql`INSERT INTO tasks VALUES (1, 0), (2, 1), (3, 0), (4, 1)`; + + const result = await sql`DELETE FROM tasks WHERE done = 1`; + expect(result.count).toBe(2); + expect(result.command).toBe("DELETE"); + }); + + test("SELECT with various clauses", async () => { + await sql`CREATE TABLE scores (id INTEGER, player TEXT, score INTEGER, team TEXT)`; + await sql`INSERT INTO scores VALUES + (1, 'Alice', 100, 'Red'), + (2, 'Bob', 85, 'Blue'), + (3, 'Charlie', 95, 'Red'), + (4, 'Diana', 110, 'Blue')`; + + const ordered = await sql`SELECT * FROM scores ORDER BY score DESC`; + expect(ordered[0].player).toBe("Diana"); + + const filtered = await sql`SELECT * FROM scores WHERE score > ${90}`; + expect(filtered).toHaveLength(3); + + const grouped = await sql` + SELECT team, COUNT(*) as count, AVG(score) as avg_score + FROM scores + GROUP BY team + `; + expect(grouped).toHaveLength(2); + + const limited = await sql`SELECT * FROM scores ORDER BY score DESC LIMIT 2 OFFSET 1`; + expect(limited).toHaveLength(2); + expect(limited[0].player).toBe("Alice"); + }); + + test("handles multiple statements with unsafe", async () => { + await sql.unsafe(` + CREATE TABLE multi1 (id INTEGER); + CREATE TABLE multi2 (id INTEGER); + INSERT INTO multi1 VALUES (1); + INSERT INTO multi2 VALUES (2); + `); + + const result1 = await sql`SELECT * FROM multi1`; + const result2 = await sql`SELECT * FROM multi2`; + + expect(result1).toHaveLength(1); + expect(result1[0].id).toBe(1); + expect(result2).toHaveLength(1); + expect(result2[0].id).toBe(2); + }); +}); + +describe("Parameterized Queries", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + await sql`CREATE TABLE params_test (id INTEGER, text_val TEXT, num_val REAL)`; + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("converts PostgreSQL $N style to SQLite ? style", async () => { + await sql`INSERT INTO params_test VALUES (${1}, ${"test"}, ${3.14})`; + + const result = await sql`SELECT * FROM params_test WHERE id = ${1}`; + expect(result[0].text_val).toBe("test"); + expect(result[0].num_val).toBeCloseTo(3.14); + }); + + test("handles many parameters", async () => { + const values = Array.from({ length: 20 }, (_, i) => i); + const columns = values.map(i => `col${i} INTEGER`).join(", "); + const tableName = "many_params"; + + await sql.unsafe(`CREATE TABLE ${tableName} (${columns})`); + + const placeholders = values.map(() => "?").join(", "); + await sql.unsafe(`INSERT INTO ${tableName} VALUES (${placeholders})`, values); + + const result = await sql.unsafe(`SELECT * FROM ${tableName}`); + expect(Object.values(result[0])).toEqual(values); + }); + + test("escapes special characters in parameters", async () => { + const specialStrings = [ + "'; DROP TABLE users; --", + '" OR "1"="1', + "\\'; DROP TABLE users; --", + "\x00\x01\x02", + "Robert'); DROP TABLE Students;--", + ]; + + for (const str of specialStrings) { + await sql`INSERT INTO params_test (id, text_val) VALUES (${100}, ${str})`; + const result = await sql`SELECT text_val FROM params_test WHERE id = ${100}`; + expect(result[0].text_val).toBe(str); + await sql`DELETE FROM params_test WHERE id = ${100}`; + } + }); +}); + +describe("Template Literal Security", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("dynamic table names are not allowed in template literals", async () => { + const tableName = "users"; + + expect(sql`CREATE TABLE ${tableName} (id INTEGER)`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"near "?": syntax error"`, + ); + + await sql.unsafe(`CREATE TABLE ${tableName} (id INTEGER)`); + const tables = await sql`SELECT name FROM sqlite_master WHERE type='table' AND name='users'`; + expect(tables).toHaveLength(1); + }); + + test("dynamic column names are not allowed in template literals", async () => { + await sql`CREATE TABLE test_security (id INTEGER, name TEXT)`; + await sql`INSERT INTO test_security VALUES (1, 'test')`; + + const columnName = "name"; + + const result = await sql`SELECT ${columnName} FROM test_security`; + expect(result[0]).toEqual({ "?": "name" }); + + const unsafeResult = await sql.unsafe(`SELECT ${columnName} FROM test_security`); + expect(unsafeResult[0].name).toBe("test"); + }); + + test("dynamic SQL structure is not allowed in template literals", async () => { + const columns = "id INTEGER, name TEXT"; + + expect(sql`CREATE TABLE dynamic_structure (${columns})`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"near "?": syntax error"`, + ); + + await sql.unsafe(`CREATE TABLE dynamic_structure (${columns})`); + const tables = await sql`SELECT name FROM sqlite_master WHERE type='table' AND name='dynamic_structure'`; + expect(tables).toHaveLength(1); + }); + + test("SQL injection is prevented with template literals", async () => { + await sql`CREATE TABLE injection_test (id INTEGER, value TEXT)`; + await sql`INSERT INTO injection_test VALUES (1, 'safe')`; + + const maliciousInput = "'; DROP TABLE injection_test; --"; + + await sql`INSERT INTO injection_test VALUES (2, ${maliciousInput})`; + + const result = await sql`SELECT * FROM injection_test WHERE id = 2`; + expect(result[0].value).toBe("'; DROP TABLE injection_test; --"); + + const tables = await sql`SELECT name FROM sqlite_master WHERE type='table' AND name='injection_test'`; + expect(tables).toHaveLength(1); + }); + + test("parameters work correctly for VALUES but not for identifiers", async () => { + await sql`CREATE TABLE identifier_test (id INTEGER, name TEXT)`; + + const id = 1; + const name = "Alice"; + await sql`INSERT INTO identifier_test VALUES (${id}, ${name})`; + + const result = await sql`SELECT * FROM identifier_test WHERE id = ${id}`; + expect(result[0].name).toBe("Alice"); + + const table = "identifier_test"; + expect(sql`SELECT * FROM ${table}`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"near "?": syntax error"`, + ); + }); + + test("sql([...]) helper not allowed when 'where in' appears only in string literal", async () => { + const sql = new SQL("sqlite://:memory:"); + expect( + sql`SELECT 'this has where in inside a string' ${sql([1, 2])}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot(`"Helpers are only allowed for INSERT, UPDATE and WHERE IN commands"`); + await sql.close(); + }); +}); + +describe("Transactions", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + await sql`CREATE TABLE accounts (id INTEGER PRIMARY KEY, balance REAL)`; + await sql`INSERT INTO accounts VALUES (1, 1000), (2, 500)`; + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("successful transaction commits", async () => { + const result = await sql.begin(async tx => { + await tx`UPDATE accounts SET balance = balance - 100 WHERE id = 1`; + await tx`UPDATE accounts SET balance = balance + 100 WHERE id = 2`; + return "success"; + }); + + expect(result).toBe("success"); + + const accounts = await sql`SELECT * FROM accounts ORDER BY id`; + expect(accounts[0].balance).toBe(900); + expect(accounts[1].balance).toBe(600); + }); + + test("failed transaction rolls back", async () => { + try { + await sql.begin(async tx => { + await tx`UPDATE accounts SET balance = balance - 2000 WHERE id = 1`; + await tx`UPDATE accounts SET balance = balance + 2000 WHERE id = 2`; + throw new Error("Insufficient funds"); + }); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toBe("Insufficient funds"); + } + + const accounts = await sql`SELECT * FROM accounts ORDER BY id`; + expect(accounts[0].balance).toBe(1000); + expect(accounts[1].balance).toBe(500); + }); + + test("nested transactions (savepoints)", async () => { + await sql.begin(async tx => { + await tx`UPDATE accounts SET balance = balance - 100 WHERE id = 1`; + + try { + await tx.savepoint(async sp => { + await sp`UPDATE accounts SET balance = balance - 200 WHERE id = 1`; + throw new Error("Inner transaction failed"); + }); + } catch (err) {} + + await tx`UPDATE accounts SET balance = balance + 100 WHERE id = 2`; + }); + + const accounts = await sql`SELECT * FROM accounts ORDER BY id`; + expect(accounts[0].balance).toBe(900); + expect(accounts[1].balance).toBe(600); + }); + + // SQLite doesn't support read-only transactions via BEGIN syntax + // It only supports DEFERRED (default), IMMEDIATE, and EXCLUSIVE + test("read-only transactions throw appropriate error", async () => { + expect( + sql.begin("readonly", async tx => { + return await tx`SELECT * FROM accounts`; + }), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"SQLite doesn't support 'readonly' transaction mode. Use DEFERRED, IMMEDIATE, or EXCLUSIVE."`, + ); + }); + + test("deferred vs immediate transactions", async () => { + await sql.begin("deferred", async tx => { + await tx`SELECT * FROM accounts`; + await tx`UPDATE accounts SET balance = balance + 1`; + }); + + await sql.begin("immediate", async tx => { + await tx`UPDATE accounts SET balance = balance + 1`; + }); + + const accounts = await sql`SELECT * FROM accounts WHERE id = 1`; + expect(accounts[0].balance).toBe(1002); + }); +}); + +describe("SQLite-specific features", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("PRAGMA statements", async () => { + const version = await sql`PRAGMA compile_options`; + expect(version.length).toBeGreaterThan(0); + + const journalMode = await sql`PRAGMA journal_mode`; + expect(journalMode[0].journal_mode).toBeDefined(); + + await sql`PRAGMA synchronous = NORMAL`; + const syncMode = await sql`PRAGMA synchronous`; + expect(syncMode[0].synchronous).toBe(1); + }); + + test("AUTOINCREMENT behavior", async () => { + await sql`CREATE TABLE auto_test ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value TEXT + )`; + + await sql`INSERT INTO auto_test (value) VALUES ('first')`; + await sql`INSERT INTO auto_test (value) VALUES ('second')`; + await sql`DELETE FROM auto_test WHERE id = 2`; + await sql`INSERT INTO auto_test (value) VALUES ('third')`; + + const results = await sql`SELECT * FROM auto_test ORDER BY id`; + expect(results[0].id).toBe(1); + expect(results[1].id).toBe(3); + }); + + test("last_insert_rowid()", async () => { + await sql`CREATE TABLE rowid_test (id INTEGER PRIMARY KEY, value TEXT)`; + await sql`INSERT INTO rowid_test (value) VALUES ('test')`; + + const result = await sql`SELECT last_insert_rowid() as id`; + expect(result[0].id).toBe(1); + }); + + test("changes() function", async () => { + await sql`CREATE TABLE changes_test (id INTEGER, value TEXT)`; + await sql`INSERT INTO changes_test VALUES (1, 'a'), (2, 'b'), (3, 'c')`; + + await sql`UPDATE changes_test SET value = 'updated' WHERE id > 1`; + const changes = await sql`SELECT changes() as count`; + expect(changes[0].count).toBe(2); + }); + + test("ATTACH DATABASE", async () => { + const dir = tempDirWithFiles("sqlite-attach-test", {}); + const attachPath = path.join(dir, "attached.db"); + + await sql`ATTACH DATABASE ${attachPath} AS attached`; + await sql`CREATE TABLE attached.other_table (id INTEGER)`; + await sql`INSERT INTO attached.other_table VALUES (1)`; + + const result = await sql`SELECT * FROM attached.other_table`; + expect(result).toHaveLength(1); + + await sql`DETACH DATABASE attached`; + await rm(dir, { recursive: true }); + }); + + test("Common Table Expressions (CTEs)", async () => { + await sql`CREATE TABLE employees (id INTEGER, name TEXT, manager_id INTEGER)`; + await sql`INSERT INTO employees VALUES + (1, 'CEO', NULL), + (2, 'VP1', 1), + (3, 'VP2', 1), + (4, 'Manager1', 2), + (5, 'Manager2', 3)`; + + const result = await sql` + WITH RECURSIVE org_chart AS ( + SELECT id, name, manager_id, 0 as level + FROM employees + WHERE manager_id IS NULL + UNION ALL + SELECT e.id, e.name, e.manager_id, oc.level + 1 + FROM employees e + JOIN org_chart oc ON e.manager_id = oc.id + ) + SELECT * FROM org_chart ORDER BY level, id + `; + + expect(result).toHaveLength(5); + expect(result[0].level).toBe(0); + expect(result[result.length - 1].level).toBe(2); + }); + + test("Full-text search (FTS5)", async () => { + await sql`CREATE VIRTUAL TABLE docs USING fts5(title, content)`; + + await sql`INSERT INTO docs VALUES + ('First Document', 'This is the content of the first document'), + ('Second Document', 'This document contains different content'), + ('Third Document', 'Another document with unique text')`; + + const results = await sql`SELECT * FROM docs WHERE docs MATCH 'content'`; + expect(results).toHaveLength(2); + + await sql`DROP TABLE docs`; + }); + + test("JSON functions", async () => { + await sql`CREATE TABLE json_test (id INTEGER, data TEXT)`; + + const jsonData = { name: "Test", values: [1, 2, 3] }; + await sql`INSERT INTO json_test VALUES (1, ${JSON.stringify(jsonData)})`; + + const name = await sql`SELECT json_extract(data, '$.name') as name FROM json_test`; + expect(name[0].name).toBe("Test"); + + const arrayLength = await sql`SELECT json_array_length(data, '$.values') as len FROM json_test`; + expect(arrayLength[0].len).toBe(3); + }); +}); + +describe("SQL helpers", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql.close(); + }); + + test("bulk insert with sql() helper", async () => { + await sql`CREATE TABLE bulk_test (id INTEGER, name TEXT, value REAL)`; + + const data = [ + { id: 1, name: "Item1", value: 10.5 }, + { id: 2, name: "Item2", value: 20.5 }, + { id: 3, name: "Item3", value: 30.5 }, + ]; + + await sql`INSERT INTO bulk_test ${sql(data)}`; + + const results = await sql`SELECT * FROM bulk_test ORDER BY id`; + expect(results).toHaveLength(3); + expect(results[0].name).toBe("Item1"); + }); + + test("unsafe with parameters", async () => { + await sql`CREATE TABLE unsafe_test (id INTEGER, value TEXT)`; + + const query = "INSERT INTO unsafe_test VALUES (?, ?)"; + await sql.unsafe(query, [1, "test"]); + + const selectQuery = "SELECT * FROM unsafe_test WHERE id = ?"; + const results = await sql.unsafe(selectQuery, [1]); + expect(results[0].value).toBe("test"); + }); + + test("file execution", async () => { + const dir = tempDirWithFiles("sql-files", { + "schema.sql": ` + CREATE TABLE file_test ( + id INTEGER PRIMARY KEY, + created_at TEXT DEFAULT CURRENT_TIMESTAMP + ); + INSERT INTO file_test (id) VALUES (1), (2), (3); + `, + "query.sql": `SELECT COUNT(*) as count FROM file_test`, + }); + + await sql.file(path.join(dir, "schema.sql")); + + const result = await sql.file(path.join(dir, "query.sql")); + expect(result[0].count).toBe(3); + }); + + test("file with parameters", async () => { + const dir = tempDirWithFiles("sql-params", { + "query.sql": `SELECT ? as param1, ? as param2`, + }); + + const result = await sql.file(path.join(dir, "query.sql"), ["value1", "value2"]); + expect(result[0].param1).toBe("value1"); + expect(result[0].param2).toBe("value2"); + }); +}); + +describe("Helper argument validation", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + await sql`CREATE TABLE helper_invalid ( + id INTEGER PRIMARY KEY, + text_val TEXT, + blob_val BLOB, + num_val REAL + )`; + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("functions are invalid values in helper", async () => { + const fn = () => 123; + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 1, text_val: fn })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); + + test("plain objects (JSON) are invalid values in helper", async () => { + const obj = { a: 1, b: "two" }; + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 2, text_val: obj as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); + + test("Map and Set are invalid values in helper", async () => { + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 3, text_val: new Map([["k", "v"]]) as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 4, text_val: new Set([1, 2, 3]) as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); + + test("Response, Request, Blob, File are invalid values in helper", async () => { + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 5, text_val: new Response("ok") as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 6, text_val: new Request("https://example.com") as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 7, blob_val: new Blob(["hello"]) as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 8, blob_val: new File(["body"], "a.txt") as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); + + test("ArrayBuffer (not a view) is invalid in helper", async () => { + const ab = new ArrayBuffer(8); + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 9, blob_val: ab as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); + + test("Promise, Date, RegExp are invalid in helper", async () => { + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 10, text_val: Promise.resolve("x") as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 11, text_val: new Date() as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + + expect( + sql`INSERT INTO helper_invalid ${sql({ id: 12, text_val: /abc/ as any })}`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); + + test("BigInt values are accepted when in range", async () => { + const id = 42n; + await sql`INSERT INTO helper_invalid ${sql({ id, text_val: "ok" })}`; + const rows = await sql`SELECT id, text_val FROM helper_invalid WHERE id = ${Number(id)}`; + expect(rows[0].text_val).toBe("ok"); + }); + + test("BigInt out of range rejects when safeIntegers is enabled", async () => { + const sqlSafe = new SQL({ adapter: "sqlite", filename: ":memory:", safeIntegers: true }); + await sqlSafe`CREATE TABLE t (id INTEGER PRIMARY KEY, n INTEGER)`; + + const big = BigInt("9223372036854775808"); // 2^63, just out of int64 range + expect(sqlSafe`INSERT INTO t ${sql({ id: 1, n: big })}`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"BigInt value '9223372036854775808' is out of range"`, + ); + await sqlSafe.close(); + }); + + test("invalid keys for helper throw immediately", () => { + const obj = { id: 1, text_val: "x" }; + expect(() => sql`INSERT INTO helper_invalid ${sql(obj, Symbol("k") as any)}`).toThrowErrorMatchingInlineSnapshot( + `"Keys must be strings or numbers: Symbol(k)"`, + ); + expect(() => sql`UPDATE helper_invalid SET ${sql(obj, 1n as any)} WHERE id = 1`).toThrowErrorMatchingInlineSnapshot( + `"Keys must be strings or numbers: 1"`, + ); + expect( + () => sql`INSERT INTO helper_invalid ${sql(obj, function bad() {} as any)}`, + ).toThrowErrorMatchingInlineSnapshot(`"Keys must be strings or numbers: function bad() {}"`); + }); + + test("WHERE IN helper accepts both arrays and single values", async () => { + const result = await sql`SELECT 1 as num WHERE 1 IN ${sql([1, 2, 3])}`.execute(); + expect(result).toBeArray(); + expect(result.length).toBe(1); + expect(result[0]).toEqual({ num: 1 }); + + const singleObj = { id: 1, name: "test" }; + const result2 = await sql`SELECT 1 as num WHERE 1 IN ${sql(singleObj, "id")}`.execute(); + expect(result2).toBeArray(); + expect(result2.length).toBe(1); + expect(result2[0]).toEqual({ num: 1 }); + + const singleObj2 = { id: 2, name: "test2" }; + const result3 = await sql`SELECT 1 as num WHERE 1 IN ${sql(singleObj2, "id")}`.execute(); + expect(result3).toBeArray(); + expect(result3.length).toBe(0); + }); + + test("WHERE IN helper rejects multiple columns", async () => { + const items = [{ a: 1, b: 2 }]; + expect(sql`SELECT 1 WHERE 1 IN ${sql(items, "a", "b")}`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"Cannot use WHERE IN helper with multiple columns"`, + ); + }); + + test("UPDATE helper rejects array of objects", async () => { + const items = [{ text_val: "a" }, { text_val: "b" }]; + expect( + sql`UPDATE helper_invalid SET ${sql(items)} WHERE id = 1`.execute(), + ).rejects.toThrowErrorMatchingInlineSnapshot(`"Cannot use array of objects for UPDATE"`); + }); + + test("invalid values in WHERE IN helper are rejected", async () => { + expect(sql`SELECT 1 WHERE 1 IN ${sql([() => {}])}`.execute()).rejects.toThrowErrorMatchingInlineSnapshot( + `"Binding expected string, TypedArray, boolean, number, bigint or null"`, + ); + }); +}); + +describe("Error handling", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql.close(); + }); + + test("syntax errors", async () => { + try { + await sql`SELCT * FROM nonexistent`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toContain("syntax error"); + } + }); + + test("constraint violations", async () => { + await sql`CREATE TABLE constraints ( + id INTEGER PRIMARY KEY, + value TEXT NOT NULL, + unique_val TEXT UNIQUE + )`; + + try { + await sql`INSERT INTO constraints (id, value) VALUES (1, ${null})`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toContain("NOT NULL"); + } + + await sql`INSERT INTO constraints VALUES (1, 'test', 'unique')`; + try { + await sql`INSERT INTO constraints VALUES (2, 'test2', 'unique')`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toContain("UNIQUE"); + } + }); + + test("foreign key violations", async () => { + await sql`PRAGMA foreign_keys = ON`; + + await sql`CREATE TABLE parent (id INTEGER PRIMARY KEY)`; + await sql`CREATE TABLE child ( + id INTEGER PRIMARY KEY, + parent_id INTEGER, + FOREIGN KEY (parent_id) REFERENCES parent(id) + )`; + + await sql`INSERT INTO parent VALUES (1)`; + await sql`INSERT INTO child VALUES (1, 1)`; + + try { + await sql`INSERT INTO child VALUES (2, 999)`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toContain("FOREIGN KEY"); + } + }); +}); + +describe("Connection management", () => { + test("close() prevents further queries", async () => { + const sql = new SQL("sqlite://:memory:"); + await sql`CREATE TABLE test (id INTEGER)`; + await sql.close(); + + try { + await sql`SELECT * FROM test`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toMatchInlineSnapshot(`"Connection closed"`); + } + }); + + test("reserve throws for SQLite", async () => { + const sql = new SQL("sqlite://:memory:"); + + expect(sql.reserve()).rejects.toThrowErrorMatchingInlineSnapshot( + `"This adapter doesn't support connection reservation"`, + ); + + await sql.close(); + }); + + test("distributed transactions throw for SQLite", async () => { + const sql = new SQL("sqlite://:memory:"); + + expect(() => sql.beginDistributed("test-tx", async () => {})).toThrowErrorMatchingInlineSnapshot( + `"This adapter doesn't support distributed transactions."`, + ); + + expect(() => sql.commitDistributed("test-tx")).toThrowErrorMatchingInlineSnapshot( + `"SQLite doesn't support distributed transactions."`, + ); + + expect(() => sql.rollbackDistributed("test-tx")).toThrowErrorMatchingInlineSnapshot( + `"SQLite doesn't support distributed transactions."`, + ); + + await sql.close(); + }); + + test("flush throws for SQLite", async () => { + const sql = new SQL("sqlite://:memory:"); + + expect(() => sql.flush()).toThrowErrorMatchingInlineSnapshot( + `"SQLite doesn't support flush() - queries are executed synchronously"`, + ); + + await sql.close(); + }); +}); + +describe("Performance & Edge Cases", () => { + test("handles large datasets", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE large (id INTEGER PRIMARY KEY, data TEXT)`; + + const rowCount = 1000; + const data = Buffer.alloc(100, "x").toString(); + + await sql.begin(async tx => { + for (let i = 0; i < rowCount; i++) { + await tx`INSERT INTO large VALUES (${i}, ${data})`; + } + }); + + const count = await sql`SELECT COUNT(*) as count FROM large`; + expect(count[0].count).toBe(rowCount); + + await sql.close(); + }); + + test("handles many columns", async () => { + const sql = new SQL(":memory:"); + + const columnCount = 100; + const columns = Array.from({ length: columnCount }, (_, i) => `col${i} INTEGER`).join(", "); + + await sql.unsafe(`CREATE TABLE wide (${columns})`); + + const values = Array.from({ length: columnCount }, (_, i) => i); + const placeholders = values.map(() => "?").join(", "); + + await sql.unsafe(`INSERT INTO wide VALUES (${placeholders})`, values); + + const result = await sql`SELECT * FROM wide`; + expect(Object.keys(result[0])).toHaveLength(columnCount); + + await sql.close(); + }); + + test("handles concurrent queries", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE concurrent (id INTEGER PRIMARY KEY, value INTEGER)`; + + const promises = Array.from({ length: 10 }, (_, i) => sql`INSERT INTO concurrent VALUES (${i}, ${i * 10})`); + + await Promise.all(promises); + + const count = await sql`SELECT COUNT(*) as count FROM concurrent`; + expect(count[0].count).toBe(10); + + await sql.close(); + }); + + test("handles empty results", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE empty (id INTEGER)`; + const results = await sql`SELECT * FROM empty`; + + expect(results).toHaveLength(0); + expect(results.command).toBe("SELECT"); + expect(results.count).toBe(0); + + await sql.close(); + }); + + test("handles special table names", async () => { + const sql = new SQL("sqlite://:memory:"); + + const specialNames = ["table-with-dash", "table.with.dots", "table with spaces", "123numeric", "SELECT"]; + + for (const name of specialNames) { + await sql.unsafe(`CREATE TABLE "${name}" (id INTEGER)`); + await sql.unsafe(`INSERT INTO "${name}" VALUES (1)`); + const result = await sql.unsafe(`SELECT * FROM "${name}"`); + expect(result).toHaveLength(1); + await sql.unsafe(`DROP TABLE "${name}"`); + } + + await sql.close(); + }); +}); + +describe("WAL mode and concurrency", () => { + test("can enable WAL mode", async () => { + const dir = tempDirWithFiles("sqlite-wal-test", {}); + const dbPath = path.join(dir, "wal-test.db"); + const sql = new SQL(`sqlite://${dbPath}`); + + await sql`PRAGMA journal_mode = WAL`; + const mode = await sql`PRAGMA journal_mode`; + expect(mode[0].journal_mode).toBe("wal"); + + await sql`CREATE TABLE wal_test (id INTEGER)`; + await sql`INSERT INTO wal_test VALUES (1)`; + + const walPath = `${dbPath}-wal`; + const shmPath = `${dbPath}-shm`; + + const walStats = await stat(walPath); + expect(walStats.isFile()).toBe(true); + expect(walStats.size).toBeGreaterThan(0); + + const shmStats = await stat(shmPath); + expect(shmStats.isFile()).toBe(true); + expect(shmStats.size).toBeGreaterThan(0); + + await sql.close(); + await rm(dir, { recursive: true }); + }); +}); + +describe("Memory and resource management", () => { + test("properly releases resources on close", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE resource_test (id INTEGER, data TEXT)`; + + for (let i = 0; i < 100; i++) { + await sql`INSERT INTO resource_test VALUES (${i}, ${"x".repeat(1000)})`; + } + + await sql.close(); + + try { + await sql`SELECT * FROM resource_test`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toMatchInlineSnapshot(`"Connection closed"`); + } + }); + + test("properly finalizes prepared statements", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE stmt_test (id INTEGER PRIMARY KEY, value TEXT)`; + + const iterations = 10000; + + for (let i = 0; i < iterations; i++) { + await sql`INSERT INTO stmt_test (id, value) VALUES (${i}, ${"test" + i})`; + + if (i % 100 === 0) { + const result = await sql`SELECT COUNT(*) as count FROM stmt_test`; + expect(result[0].count).toBe(i + 1); + } + } + + await sql` + DELETE FROM stmt_test WHERE id < 100; + DELETE FROM stmt_test WHERE id < 200; + DELETE FROM stmt_test WHERE id < 300; + `; + + const finalCount = await sql`SELECT COUNT(*) as count FROM stmt_test`; + expect(finalCount[0].count).toBe(iterations - 300); + + await sql.close(); + }); + + test("handles many concurrent prepared statements", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE concurrent_test (id INTEGER, value TEXT)`; + + const promises: Promise[] = []; + for (let i = 0; i < 1000; i++) { + promises.push(sql`INSERT INTO concurrent_test VALUES (${i}, ${"value" + i})`); + } + + await Promise.all(promises); + + const result = await sql`SELECT COUNT(*) as count FROM concurrent_test`; + expect(result[0].count).toBe(1000); + + const selectPromises: Promise[] = []; + for (let i = 0; i < 100; i++) { + selectPromises.push(sql`SELECT * FROM concurrent_test WHERE id = ${i}`); + } + + const results = await Promise.all(selectPromises); + results.forEach((result, i) => { + expect(result).toHaveLength(1); + expect(result[0].id).toBe(i); + }); + + await sql.close(); + }); +}); + +describe("Connection URL Edge Cases", () => { + test("handles various file:// URL formats", async () => { + const dir = tempDirWithFiles("sqlite-url-test", {}); + + const dbPath1 = path.join(dir, "test1.db"); + const sql1 = new SQL(`file://${dbPath1}`); + await sql1`CREATE TABLE test (id INTEGER)`; + await sql1`INSERT INTO test VALUES (1)`; + const result1 = await sql1`SELECT * FROM test`; + expect(result1).toHaveLength(1); + await sql1.close(); + + const dbPath2 = path.join(dir, "test2.db"); + const sql2 = new SQL(`file:${dbPath2}`); + await sql2`CREATE TABLE test (id INTEGER)`; + await sql2.close(); + + await rm(dir, { recursive: true }); + }); + + test("handles special characters in database paths", async () => { + const specialNames = [ + "test with spaces.db", + "test-with-dash.db", + "test.with.dots.db", + "test_underscore.db", + "test@symbol.db", + "test#hash.db", + "test%percent.db", + "test&ersand.db", + "test(parens).db", + "test[brackets].db", + "test{braces}.db", + "test'quote.db", + ]; + + for (const name of specialNames) { + const dir = tempDirWithFiles(`sqlite-special-${Math.random()}`, {}); + const dbPath = path.join(dir, name); + + const sql = new SQL(`sqlite://${dbPath}`); + await sql`CREATE TABLE test (id INTEGER)`; + await sql`INSERT INTO test VALUES (1)`; + + const result = await sql`SELECT * FROM test`; + expect(result).toHaveLength(1); + + expect(sql.options.filename).toBe(join(dir, name)); + + await sql.close(); + await rm(dir, { recursive: true }); + } + }); + + test("handles relative vs absolute paths", async () => { + const dir = tempDirWithFiles("sqlite-path-test", {}); + const originalCwd = process.cwd(); + + try { + process.chdir(dir); + + const sql1 = new SQL("sqlite://./relative.db"); + await sql1`CREATE TABLE test (id INTEGER)`; + await sql1.close(); + + expect(existsSync(path.join(dir, "relative.db"))).toBe(true); + + const absPath = path.join(dir, "absolute.db"); + const sql2 = new SQL(`sqlite://${absPath}`); + await sql2`CREATE TABLE test (id INTEGER)`; + await sql2.close(); + + expect(existsSync(absPath)).toBe(true); + } finally { + process.chdir(originalCwd); + await rm(dir, { recursive: true }); + } + }); + + test("handles readonly mode via URL parameters", async () => { + const dir = tempDirWithFiles("sqlite-readonly-test", {}); + const dbPath = path.join(dir, "readonly.db"); + + const sql1 = new SQL(`sqlite://${dbPath}`); + await sql1`CREATE TABLE test (id INTEGER)`; + await sql1`INSERT INTO test VALUES (1)`; + await sql1.close(); + + const sql2 = new SQL(`sqlite://${dbPath}?mode=ro`); + + const result = await sql2`SELECT * FROM test`; + expect(result).toHaveLength(1); + + try { + await sql2`INSERT INTO test VALUES (2)`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toContain("readonly"); + } + + await sql2.close(); + await rm(dir, { recursive: true }); + }); + + test("handles URI parameters for cache and other settings", async () => { + const dir = tempDirWithFiles("sqlite-uri-test", {}); + const dbPath = path.join(dir, "uri.db"); + + const sql = new SQL(`sqlite://${dbPath}?cache=shared&mode=rwc`); + + await sql`CREATE TABLE test (id INTEGER)`; + await sql`INSERT INTO test VALUES (1)`; + + const pragmas = await sql`PRAGMA cache_size`; + expect(pragmas).toBeDefined(); + + await sql.close(); + await rm(dir, { recursive: true }); + }); +}); + +describe("BLOB Edge Cases and Binary Data", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("handles zero-length BLOBs", async () => { + await sql`CREATE TABLE blob_test (id INTEGER, data BLOB)`; + + const emptyBuffer = Buffer.alloc(0); + await sql`INSERT INTO blob_test VALUES (1, ${emptyBuffer})`; + + const result = await sql`SELECT * FROM blob_test`; + expect(Buffer.from(result[0].data)).toHaveLength(0); + }); + + test("handles large BLOBs", async () => { + await sql`CREATE TABLE large_blob (id INTEGER, data BLOB)`; + + const sizes = [1024 * 1024, 10 * 1024 * 1024]; + + for (const size of sizes) { + const largeBuffer = Buffer.alloc(size); + + for (let i = 0; i < size; i++) { + largeBuffer[i] = i % 256; + } + + await sql`INSERT INTO large_blob VALUES (${size}, ${largeBuffer})`; + + const result = await sql`SELECT * FROM large_blob WHERE id = ${size}`; + const retrieved = Buffer.from(result[0].data); + + expect(retrieved.length).toBe(size); + + for (let i = 0; i < Math.min(100, size); i++) { + expect(retrieved[i]).toBe(i % 256); + } + } + }); + + test("handles binary data with all byte values", async () => { + await sql`CREATE TABLE binary_test (id INTEGER, data BLOB)`; + + const allBytes = Buffer.alloc(256); + for (let i = 0; i < 256; i++) { + allBytes[i] = i; + } + + await sql`INSERT INTO binary_test VALUES (1, ${allBytes})`; + + const result = await sql`SELECT * FROM binary_test`; + const retrieved = Buffer.from(result[0].data); + + expect(retrieved.length).toBe(256); + for (let i = 0; i < 256; i++) { + expect(retrieved[i]).toBe(i); + } + }); + + test("handles Uint8Array and ArrayBuffer", async () => { + await sql`CREATE TABLE array_test (id INTEGER, data BLOB)`; + + const uint8 = new Uint8Array([1, 2, 3, 4, 5]); + await sql`INSERT INTO array_test VALUES (1, ${uint8})`; + + const arrayBuffer = new ArrayBuffer(8); + const view = new DataView(arrayBuffer); + view.setInt32(0, 0x12345678); + view.setInt32(4, 0x9abcdef0); + await sql`INSERT INTO array_test VALUES (2, ${Buffer.from(arrayBuffer)})`; + + const results = await sql`SELECT * FROM array_test ORDER BY id`; + expect(Buffer.from(results[0].data)).toEqual(Buffer.from([1, 2, 3, 4, 5])); + expect(Buffer.from(results[1].data).length).toBe(8); + }); +}); + +describe("Special Characters and Escape Sequences", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + await sql`CREATE TABLE special_chars (id INTEGER, text_val TEXT)`; + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("handles various quote types", async () => { + const quotes = [ + `Single ' quote`, + `Double " quote`, + `Both ' and " quotes`, + `Backtick \` quote`, + `'Multiple' "quote" 'types'`, + `It's a "test"`, + `\\'escaped\\' quotes`, + `"""triple quotes"""`, + `'''triple single'''`, + ]; + + for (let i = 0; i < quotes.length; i++) { + await sql`INSERT INTO special_chars VALUES (${i}, ${quotes[i]})`; + const result = await sql`SELECT text_val FROM special_chars WHERE id = ${i}`; + expect(result[0].text_val).toBe(quotes[i]); + } + }); + + test("handles control characters and escape sequences", async () => { + const controls = ["\n\r\t", "\x00\x01\x02", "\b\f\v", "\\n\\r\\t", "\u0000\u001F", "\x1B[31mANSI\x1B[0m"]; + + await sql`CREATE TABLE control_chars (id INTEGER, val TEXT)`; + + for (let i = 0; i < controls.length; i++) { + await sql`INSERT INTO control_chars VALUES (${i}, ${controls[i]})`; + const result = await sql`SELECT val FROM control_chars WHERE id = ${i}`; + expect(result[0].val).toBe(controls[i]); + } + }); + + test("handles Unicode and emoji", async () => { + const unicode = [ + "Hello 世界", + "مرحبا بالعالم", + "שלום עולם", + "Здравствуй мир", + "🚀🎉🌟", + "👨‍👩‍👧‍👦", + "𝓗𝓮𝓵𝓵𝓸", + "A\u0301", + "🏴󠁧󠁢󠁥󠁮󠁧󠁿", + ]; + + await sql`CREATE TABLE unicode_test (id INTEGER, val TEXT)`; + + for (let i = 0; i < unicode.length; i++) { + await sql`INSERT INTO unicode_test VALUES (${i}, ${unicode[i]})`; + const result = await sql`SELECT val FROM unicode_test WHERE id = ${i}`; + expect(result[0].val).toBe(unicode[i]); + } + }); + + test("handles very long strings", async () => { + await sql`CREATE TABLE long_strings (id INTEGER, val TEXT)`; + + const lengths = [1000, 10000, 100000, 1000000]; + + for (const len of lengths) { + const longString = Buffer.alloc(len, "a").toString(); + await sql`INSERT INTO long_strings VALUES (${len}, ${longString})`; + + const result = await sql`SELECT LENGTH(val) as len FROM long_strings WHERE id = ${len}`; + expect(result[0].len).toBe(len); + } + }); +}); + +describe("Triggers and Views", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("CREATE and use TRIGGER", async () => { + await sql`CREATE TABLE audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + table_name TEXT, + operation TEXT, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP + )`; + + await sql`CREATE TABLE users ( + id INTEGER PRIMARY KEY, + name TEXT, + updated_at TEXT + )`; + + await sql`CREATE TRIGGER user_update_trigger + AFTER UPDATE ON users + BEGIN + INSERT INTO audit_log (table_name, operation) + VALUES ('users', 'UPDATE'); + UPDATE users SET updated_at = CURRENT_TIMESTAMP + WHERE id = NEW.id; + END`; + + await sql`INSERT INTO users (id, name) VALUES (1, 'Alice')`; + await sql`UPDATE users SET name = 'Alice Updated' WHERE id = 1`; + + const logs = await sql`SELECT * FROM audit_log`; + expect(logs).toHaveLength(1); + expect(logs[0].operation).toBe("UPDATE"); + + const user = await sql`SELECT * FROM users WHERE id = 1`; + expect(user[0].updated_at).toBeDefined(); + }); + + test("CREATE and query VIEW", async () => { + await sql`CREATE TABLE orders ( + id INTEGER PRIMARY KEY, + customer_id INTEGER, + amount REAL, + status TEXT + )`; + + await sql`INSERT INTO orders VALUES + (1, 1, 100.0, 'completed'), + (2, 1, 50.0, 'pending'), + (3, 2, 200.0, 'completed'), + (4, 2, 75.0, 'cancelled')`; + + await sql`CREATE VIEW customer_summary AS + SELECT + customer_id, + COUNT(*) as total_orders, + SUM(CASE WHEN status = 'completed' THEN amount ELSE 0 END) as total_spent, + AVG(amount) as avg_order_value + FROM orders + GROUP BY customer_id`; + + const summary = await sql`SELECT * FROM customer_summary ORDER BY customer_id`; + expect(summary).toHaveLength(2); + expect(summary[0].total_orders).toBe(2); + expect(summary[0].total_spent).toBe(100.0); + expect(summary[1].total_orders).toBe(2); + expect(summary[1].total_spent).toBe(200.0); + }); + + test("triggers with WHEN conditions", async () => { + await sql`CREATE TABLE inventory ( + id INTEGER PRIMARY KEY, + product TEXT, + quantity INTEGER, + reorder_level INTEGER DEFAULT 10 + )`; + + await sql`CREATE TABLE reorder_alerts ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + product TEXT, + quantity INTEGER, + created_at TEXT DEFAULT CURRENT_TIMESTAMP + )`; + + await sql`CREATE TRIGGER low_stock_trigger + AFTER UPDATE OF quantity ON inventory + WHEN NEW.quantity < NEW.reorder_level + BEGIN + INSERT INTO reorder_alerts (product, quantity) + VALUES (NEW.product, NEW.quantity); + END`; + + await sql`INSERT INTO inventory VALUES (1, 'Widget', 100, 10)`; + await sql`UPDATE inventory SET quantity = 5 WHERE id = 1`; + + const alerts = await sql`SELECT * FROM reorder_alerts`; + expect(alerts).toHaveLength(1); + expect(alerts[0].product).toBe("Widget"); + expect(alerts[0].quantity).toBe(5); + + await sql`UPDATE inventory SET quantity = 15 WHERE id = 1`; + const alerts2 = await sql`SELECT * FROM reorder_alerts`; + expect(alerts2).toHaveLength(1); + }); +}); + +describe("Indexes and Query Optimization", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("CREATE various types of indexes", async () => { + await sql`CREATE TABLE products ( + id INTEGER PRIMARY KEY, + name TEXT, + category TEXT, + price REAL, + sku TEXT UNIQUE, + description TEXT + )`; + + await sql`CREATE INDEX idx_category ON products(category)`; + + await sql`CREATE INDEX idx_category_price ON products(category, price DESC)`; + + await sql`CREATE UNIQUE INDEX idx_sku ON products(sku)`; + + await sql`CREATE INDEX idx_expensive ON products(price) WHERE price > 100`; + + await sql`CREATE INDEX idx_name_lower ON products(LOWER(name))`; + + for (let i = 1; i <= 100; i++) { + await sql`INSERT INTO products VALUES ( + ${i}, + ${"Product " + i}, + ${"Category " + (i % 10)}, + ${i * 10.5}, + ${"SKU-" + i.toString().padStart(5, "0")}, + ${"Description for product " + i} + )`; + } + + try { + await sql`INSERT INTO products VALUES (101, 'Test', 'Test', 10, 'SKU-00001', 'Duplicate SKU')`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + expect((err as Error).message).toContain("UNIQUE"); + } + + const results = await sql`SELECT * FROM products WHERE category = 'Category 5'`; + expect(results.length).toBeGreaterThan(0); + + const expensive = await sql`SELECT * FROM products WHERE price > 500`; + expect(expensive.length).toBeGreaterThan(0); + }); + + test("ANALYZE and query planning", async () => { + await sql`CREATE TABLE stats_test ( + id INTEGER PRIMARY KEY, + type TEXT, + value INTEGER + )`; + + await sql`CREATE INDEX idx_type ON stats_test(type)`; + + for (let i = 1; i <= 1000; i++) { + const type = i <= 900 ? "common" : i <= 990 ? "uncommon" : "rare"; + await sql`INSERT INTO stats_test VALUES (${i}, ${type}, ${i})`; + } + + await sql`ANALYZE`; + + const stats = await sql`SELECT * FROM sqlite_stat1`; + expect(stats.length).toBeGreaterThan(0); + }); + + test("covering indexes", async () => { + await sql`CREATE TABLE users ( + id INTEGER PRIMARY KEY, + email TEXT, + username TEXT, + created_at TEXT + )`; + + await sql`CREATE INDEX idx_email_username ON users(email, username)`; + + for (let i = 1; i <= 100; i++) { + await sql`INSERT INTO users VALUES ( + ${i}, + ${"user" + i + "@example.com"}, + ${"user" + i}, + ${new Date().toISOString()} + )`; + } + + const result = await sql`SELECT email, username FROM users WHERE email LIKE 'user1%'`; + expect(result.length).toBeGreaterThan(0); + }); +}); + +describe("VACUUM and Database Maintenance", () => { + test("VACUUM command", async () => { + const dir = tempDirWithFiles("sqlite-vacuum-test", {}); + const dbPath = path.join(dir, "vacuum.db"); + const sql = new SQL(`sqlite://${dbPath}`); + + await sql`CREATE TABLE vacuum_test (id INTEGER, data TEXT)`; + + for (let i = 0; i < 1000; i++) { + await sql`INSERT INTO vacuum_test VALUES (${i}, ${Buffer.alloc(100, "x").toString()})`; + } + + await sql`DELETE FROM vacuum_test WHERE id % 2 = 0`; + + const statsBefore = await stat(dbPath); + const sizeBefore = statsBefore.size; + + await sql`VACUUM`; + + const statsAfter = await stat(dbPath); + const sizeAfter = statsAfter.size; + + expect(sizeAfter).toBeLessThanOrEqual(sizeBefore); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("incremental VACUUM with auto_vacuum", async () => { + const dir = tempDirWithFiles("sqlite-auto-vacuum-test", {}); + const dbPath = path.join(dir, "auto_vacuum.db"); + const sql = new SQL(`sqlite://${dbPath}`); + + await sql`PRAGMA auto_vacuum = 2`; + + await sql`CREATE TABLE test (id INTEGER, data TEXT)`; + + for (let i = 0; i < 100; i++) { + await sql`INSERT INTO test VALUES (${i}, ${Buffer.alloc(1000, "x").toString()})`; + } + + await sql`DELETE FROM test WHERE id < 50`; + + await sql`PRAGMA incremental_vacuum(10)`; + + const pageCount = await sql`PRAGMA page_count`; + expect(pageCount[0].page_count).toBeGreaterThan(0); + + await sql.close(); + await rm(dir, { recursive: true }); + }); +}); + +describe("Backup and Restore Operations", () => { + test("backup to another file", async () => { + const dir = tempDirWithFiles("sqlite-backup-test", {}); + const sourcePath = path.join(dir, "source.db"); + const backupPath = path.join(dir, "backup.db"); + + const source = new SQL(`sqlite://${sourcePath}`); + + await source`CREATE TABLE backup_test (id INTEGER PRIMARY KEY, data TEXT)`; + for (let i = 1; i <= 10; i++) { + await source`INSERT INTO backup_test VALUES (${i}, ${"Data " + i})`; + } + + await source.unsafe(`VACUUM INTO '${backupPath}'`); + + await source.close(); + + const backup = new SQL(`sqlite://${backupPath}`); + const data = await backup`SELECT * FROM backup_test`; + expect(data).toHaveLength(10); + expect(data[0].data).toBe("Data 1"); + + await backup.close(); + await rm(dir, { recursive: true }); + }); +}); + +describe("Custom Collations and Functions", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("case-insensitive collation with NOCASE", async () => { + await sql`CREATE TABLE collation_test ( + id INTEGER PRIMARY KEY, + name TEXT COLLATE NOCASE + )`; + + await sql`INSERT INTO collation_test VALUES + (1, 'Alice'), + (2, 'alice'), + (3, 'ALICE'), + (4, 'Bob')`; + + const result = await sql`SELECT * FROM collation_test WHERE name = 'alice'`; + expect(result).toHaveLength(3); + + expect(result.map(r => r.name).sort()).toEqual(["ALICE", "Alice", "alice"]); + }); + + test("binary collation", async () => { + await sql`CREATE TABLE binary_collation ( + id INTEGER PRIMARY KEY, + data TEXT COLLATE BINARY + )`; + + await sql`INSERT INTO binary_collation VALUES + (1, 'A'), + (2, 'a'), + (3, 'B'), + (4, 'b')`; + + const result = await sql`SELECT * FROM binary_collation ORDER BY data`; + expect(result.map(r => r.data)).toEqual(["A", "B", "a", "b"]); + }); +}); + +describe("Window Functions", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE sales ( + id INTEGER PRIMARY KEY, + employee TEXT, + department TEXT, + amount REAL, + sale_date TEXT + )`; + + const sales = [ + ["Alice", "Sales", 1000, "2024-01-01"], + ["Alice", "Sales", 1500, "2024-01-02"], + ["Bob", "Sales", 800, "2024-01-01"], + ["Bob", "Sales", 1200, "2024-01-02"], + ["Charlie", "Marketing", 900, "2024-01-01"], + ["Charlie", "Marketing", 1100, "2024-01-02"], + ]; + + for (const [employee, department, amount, date] of sales) { + await sql`INSERT INTO sales (employee, department, amount, sale_date) + VALUES (${employee}, ${department}, ${amount}, ${date})`; + } + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("ROW_NUMBER window function", async () => { + const result = await sql` + SELECT + employee, + amount, + ROW_NUMBER() OVER (ORDER BY amount DESC) as rank + FROM sales + ORDER BY rank + `; + + expect(result[0].rank).toBe(1); + expect(result[0].amount).toBe(1500); + expect(result[result.length - 1].rank).toBe(6); + }); + + test("partition by with window functions", async () => { + const result = await sql` + SELECT + employee, + department, + amount, + SUM(amount) OVER (PARTITION BY department) as dept_total, + AVG(amount) OVER (PARTITION BY employee) as employee_avg + FROM sales + ORDER BY department, employee + `; + + const marketingRows = result.filter(r => r.department === "Marketing"); + expect(marketingRows[0].dept_total).toBe(2000); + + const salesRows = result.filter(r => r.department === "Sales"); + expect(salesRows[0].dept_total).toBe(4500); + }); + + test("running totals with window functions", async () => { + const result = await sql` + SELECT + employee, + sale_date, + amount, + SUM(amount) OVER ( + PARTITION BY employee + ORDER BY sale_date + ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW + ) as running_total + FROM sales + WHERE employee = 'Alice' + ORDER BY sale_date + `; + + expect(result[0].running_total).toBe(1000); + expect(result[1].running_total).toBe(2500); + }); +}); + +describe("Check Constraints and Complex Validations", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("CHECK constraints on columns", async () => { + await sql`CREATE TABLE validated ( + id INTEGER PRIMARY KEY, + age INTEGER CHECK (age >= 0 AND age <= 150), + email TEXT CHECK (email LIKE '%@%.%'), + status TEXT CHECK (status IN ('active', 'inactive', 'pending')), + percentage REAL CHECK (percentage >= 0 AND percentage <= 100) + )`; + + await sql`INSERT INTO validated VALUES (1, 25, 'test@example.com', 'active', 50.5)`; + + try { + await sql`INSERT INTO validated VALUES (2, -1, 'test@example.com', 'active', 50)`; + expect(true).toBe(false); + } catch (err) { + expect((err as Error).message).toContain("CHECK"); + } + + try { + await sql`INSERT INTO validated VALUES (3, 25, 'notanemail', 'active', 50)`; + expect(true).toBe(false); + } catch (err) { + expect((err as Error).message).toContain("CHECK"); + } + + try { + await sql`INSERT INTO validated VALUES (4, 25, 'test@example.com', 'invalid', 50)`; + expect(true).toBe(false); + } catch (err) { + expect((err as Error).message).toContain("CHECK"); + } + + try { + await sql`INSERT INTO validated VALUES (5, 25, 'test@example.com', 'active', 101)`; + expect(true).toBe(false); + } catch (err) { + expect((err as Error).message).toContain("CHECK"); + } + }); + + test("table-level CHECK constraints", async () => { + await sql`CREATE TABLE orders ( + id INTEGER PRIMARY KEY, + start_date TEXT, + end_date TEXT, + quantity INTEGER, + price REAL, + CHECK (end_date >= start_date), + CHECK (quantity * price >= 0) + )`; + + await sql`INSERT INTO orders VALUES (1, '2024-01-01', '2024-01-31', 10, 9.99)`; + + try { + await sql`INSERT INTO orders VALUES (2, '2024-02-01', '2024-01-01', 10, 9.99)`; + expect(true).toBe(false); + } catch (err) { + expect((err as Error).message).toContain("CHECK"); + } + }); +}); + +describe("Generated Columns", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("GENERATED ALWAYS AS virtual columns", async () => { + await sql`CREATE TABLE products ( + id INTEGER PRIMARY KEY, + price REAL, + tax_rate REAL, + total_price REAL GENERATED ALWAYS AS (price * (1 + tax_rate)) VIRTUAL, + price_category TEXT GENERATED ALWAYS AS ( + CASE + WHEN price < 10 THEN 'cheap' + WHEN price < 100 THEN 'moderate' + ELSE 'expensive' + END + ) VIRTUAL + )`; + + await sql`INSERT INTO products (id, price, tax_rate) VALUES + (1, 5.00, 0.1), + (2, 50.00, 0.2), + (3, 500.00, 0.15)`; + + const results = await sql`SELECT * FROM products ORDER BY id`; + + expect(results[0].total_price).toBeCloseTo(5.5, 2); + expect(results[0].price_category).toBe("cheap"); + + expect(results[1].total_price).toBeCloseTo(60.0, 2); + expect(results[1].price_category).toBe("moderate"); + + expect(results[2].total_price).toBeCloseTo(575.0, 2); + expect(results[2].price_category).toBe("expensive"); + }); + + test("GENERATED ALWAYS AS stored columns", async () => { + await sql`CREATE TABLE rectangles ( + id INTEGER PRIMARY KEY, + width REAL, + height REAL, + area REAL GENERATED ALWAYS AS (width * height) STORED, + perimeter REAL GENERATED ALWAYS AS (2 * (width + height)) STORED + )`; + + await sql`INSERT INTO rectangles (id, width, height) VALUES + (1, 10, 20), + (2, 5.5, 3.2)`; + + const results = await sql`SELECT * FROM rectangles ORDER BY id`; + + expect(results[0].area).toBe(200); + expect(results[0].perimeter).toBe(60); + + expect(results[1].area).toBeCloseTo(17.6, 2); + expect(results[1].perimeter).toBeCloseTo(17.4, 2); + + await sql`UPDATE rectangles SET width = 15 WHERE id = 1`; + const updated = await sql`SELECT * FROM rectangles WHERE id = 1`; + expect(updated[0].area).toBe(300); + expect(updated[0].perimeter).toBe(70); + }); +}); + +describe("Partial Indexes", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("partial index with WHERE clause", async () => { + await sql`CREATE TABLE tasks ( + id INTEGER PRIMARY KEY, + title TEXT, + status TEXT, + priority INTEGER, + due_date TEXT + )`; + + await sql`CREATE INDEX idx_urgent_tasks + ON tasks(due_date, priority) + WHERE status != 'completed' AND priority > 3`; + + const tasks = [ + ["Task 1", "pending", 5, "2024-01-01"], + ["Task 2", "completed", 5, "2024-01-01"], + ["Task 3", "pending", 2, "2024-01-01"], + ["Task 4", "pending", 4, "2024-01-02"], + ]; + + for (let i = 0; i < tasks.length; i++) { + const [title, status, priority, due_date] = tasks[i]; + await sql`INSERT INTO tasks VALUES (${i + 1}, ${title}, ${status}, ${priority}, ${due_date})`; + } + + const urgent = await sql` + SELECT * FROM tasks + WHERE status != 'completed' AND priority > 3 + ORDER BY due_date, priority + `; + + expect(urgent).toHaveLength(2); + expect(urgent[0].title).toBe("Task 1"); + expect(urgent[1].title).toBe("Task 4"); + }); +}); + +describe("UPSERT Operations", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("INSERT OR REPLACE", async () => { + await sql`CREATE TABLE users ( + id INTEGER PRIMARY KEY, + email TEXT UNIQUE, + name TEXT, + login_count INTEGER DEFAULT 0 + )`; + + await sql`INSERT INTO users VALUES (1, 'alice@example.com', 'Alice', 1)`; + + await sql`INSERT OR REPLACE INTO users VALUES (1, 'alice@example.com', 'Alice Updated', 5)`; + + const result = await sql`SELECT * FROM users WHERE id = 1`; + expect(result[0].name).toBe("Alice Updated"); + expect(result[0].login_count).toBe(5); + }); + + test("INSERT ON CONFLICT DO UPDATE", async () => { + await sql`CREATE TABLE inventory ( + product_id INTEGER PRIMARY KEY, + name TEXT, + quantity INTEGER, + last_updated TEXT + )`; + + await sql`INSERT INTO inventory VALUES (1, 'Widget', 100, '2024-01-01')`; + + await sql` + INSERT INTO inventory VALUES (1, 'Widget', 50, '2024-01-02') + ON CONFLICT(product_id) DO UPDATE SET + quantity = quantity + excluded.quantity, + last_updated = excluded.last_updated + `; + + const result = await sql`SELECT * FROM inventory WHERE product_id = 1`; + expect(result[0].quantity).toBe(150); + expect(result[0].last_updated).toBe("2024-01-02"); + + await sql` + INSERT INTO inventory VALUES (2, 'Gadget', 75, '2024-01-02') + ON CONFLICT(product_id) DO UPDATE SET + quantity = quantity + excluded.quantity + `; + + const all = await sql`SELECT * FROM inventory ORDER BY product_id`; + expect(all).toHaveLength(2); + }); + + test("INSERT ON CONFLICT DO NOTHING", async () => { + await sql`CREATE TABLE settings ( + key TEXT PRIMARY KEY, + value TEXT + )`; + + await sql`INSERT INTO settings VALUES ('theme', 'dark')`; + + const result = await sql` + INSERT INTO settings VALUES ('theme', 'light') + ON CONFLICT(key) DO NOTHING + RETURNING * + `; + + expect(result).toHaveLength(0); + + const setting = await sql`SELECT * FROM settings WHERE key = 'theme'`; + expect(setting[0].value).toBe("dark"); + }); +}); + +describe("WITHOUT ROWID Tables", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("WITHOUT ROWID table with composite primary key", async () => { + await sql`CREATE TABLE sessions ( + user_id INTEGER, + device_id TEXT, + token TEXT, + created_at TEXT, + PRIMARY KEY (user_id, device_id) + ) WITHOUT ROWID`; + + await sql`INSERT INTO sessions VALUES + (1, 'phone', 'token1', '2024-01-01'), + (1, 'laptop', 'token2', '2024-01-01'), + (2, 'phone', 'token3', '2024-01-01')`; + + const results = await sql`SELECT * FROM sessions WHERE user_id = 1`; + expect(results).toHaveLength(2); + + try { + await sql`INSERT INTO sessions VALUES (1, 'phone', 'token4', '2024-01-02')`; + expect(true).toBe(false); + } catch (err) { + expect((err as Error).message).toContain("UNIQUE"); + } + }); +}); + +describe("Concurrency and Locking", () => { + test("concurrent reads work correctly", async () => { + const dir = tempDirWithFiles("sqlite-concurrent-test", {}); + const dbPath = path.join(dir, "concurrent.db"); + + const sql1 = new SQL(`sqlite://${dbPath}`); + await sql1`CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)`; + + for (let i = 1; i <= 100; i++) { + await sql1`INSERT INTO test VALUES (${i}, ${"value" + i})`; + } + + const sql2 = new SQL(`sqlite://${dbPath}`); + const sql3 = new SQL(`sqlite://${dbPath}`); + + const [result1, result2, result3] = await Promise.all([ + sql1`SELECT COUNT(*) as count FROM test`, + sql2`SELECT COUNT(*) as count FROM test`, + sql3`SELECT COUNT(*) as count FROM test`, + ]); + + expect(result1[0].count).toBe(100); + expect(result2[0].count).toBe(100); + expect(result3[0].count).toBe(100); + + await sql1.close(); + await sql2.close(); + await sql3.close(); + await rm(dir, { recursive: true }); + }); + + test("write lock prevents concurrent writes", async () => { + const dir = tempDirWithFiles("sqlite-write-lock-test", {}); + const dbPath = path.join(dir, "writelock.db"); + + const sql = new SQL(`sqlite://${dbPath}`); + await sql`CREATE TABLE counter (id INTEGER PRIMARY KEY, value INTEGER)`; + await sql`INSERT INTO counter VALUES (1, 0)`; + + const updatePromise = sql.begin(async tx => { + await tx`UPDATE counter SET value = value + 1 WHERE id = 1`; + + await new Promise(resolve => setTimeout(resolve, 50)); + await tx`UPDATE counter SET value = value + 1 WHERE id = 1`; + return "done"; + }); + + const sql2 = new SQL(`sqlite://${dbPath}`); + + const startTime = Date.now(); + await updatePromise; + const duration = Date.now() - startTime; + + expect(duration).toBeGreaterThanOrEqual(40); + + const final = await sql`SELECT value FROM counter WHERE id = 1`; + expect(final[0].value).toBe(2); + + await sql.close(); + await sql2.close(); + await rm(dir, { recursive: true }); + }); + + test("busy timeout handling", async () => { + const dir = tempDirWithFiles("sqlite-busy-test", {}); + const dbPath = path.join(dir, "busy.db"); + + const sql1 = new SQL(`sqlite://${dbPath}`); + await sql1`CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)`; + + await sql1`PRAGMA busy_timeout = 100`; + + const sql2 = new SQL(`sqlite://${dbPath}`); + await sql2`PRAGMA busy_timeout = 100`; + + const longTransaction = sql1.begin(async tx => { + await tx`INSERT INTO test VALUES (1, 'test')`; + await new Promise(resolve => setTimeout(resolve, 200)); + return "done"; + }); + + try { + await sql2`INSERT INTO test VALUES (2, 'test2')`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + } + + await longTransaction; + + await sql1.close(); + await sql2.close(); + await rm(dir, { recursive: true }); + }); +}); + +describe("Date and Time Functions", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("date and time functions", async () => { + await sql`CREATE TABLE timestamps ( + id INTEGER PRIMARY KEY, + created_at TEXT DEFAULT CURRENT_TIMESTAMP, + date_only TEXT DEFAULT (DATE('now')), + time_only TEXT DEFAULT (TIME('now')) + )`; + + await sql`INSERT INTO timestamps (id) VALUES (1)`; + + const result = await sql`SELECT * FROM timestamps`; + expect(result[0].created_at).toBeDefined(); + expect(result[0].date_only).toMatch(/^\d{4}-\d{2}-\d{2}$/); + expect(result[0].time_only).toMatch(/^\d{2}:\d{2}:\d{2}$/); + }); + + test("date arithmetic", async () => { + const results = await sql` + SELECT + DATE('2024-01-15', '+1 month') as next_month, + DATE('2024-01-15', '-7 days') as last_week, + DATE('2024-01-15', '+1 year') as next_year, + julianday('2024-01-15') - julianday('2024-01-01') as days_diff + `; + + expect(results[0].next_month).toBe("2024-02-15"); + expect(results[0].last_week).toBe("2024-01-08"); + expect(results[0].next_year).toBe("2025-01-15"); + expect(results[0].days_diff).toBe(14); + }); + + test("strftime formatting", async () => { + const results = await sql` + SELECT + strftime('%Y-%m-%d', '2024-01-15 14:30:45') as date_only, + strftime('%H:%M:%S', '2024-01-15 14:30:45') as time_only, + strftime('%w', '2024-01-15') as day_of_week, + strftime('%j', '2024-01-15') as day_of_year, + strftime('%s', '2024-01-15 00:00:00') as unix_timestamp + `; + + expect(results[0].date_only).toBe("2024-01-15"); + expect(results[0].time_only).toBe("14:30:45"); + expect(results[0].day_of_week).toBe("1"); + expect(results[0].day_of_year).toBe("015"); + expect(parseInt(results[0].unix_timestamp)).toBeGreaterThan(0); + }); +}); + +describe("Aggregate Functions and Grouping", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE sales_data ( + id INTEGER PRIMARY KEY, + region TEXT, + product TEXT, + quantity INTEGER, + price REAL, + sale_date TEXT + )`; + + const salesData = [ + ["North", "Widget", 10, 25.5, "2024-01-01"], + ["North", "Widget", 15, 25.5, "2024-01-02"], + ["North", "Gadget", 5, 75.0, "2024-01-01"], + ["South", "Widget", 20, 25.5, "2024-01-01"], + ["South", "Gadget", 8, 75.0, "2024-01-02"], + ["East", "Widget", 12, 25.5, "2024-01-01"], + ["East", "Gadget", 3, 75.0, "2024-01-01"], + ["West", "Widget", 18, 25.5, "2024-01-02"], + ]; + + for (let i = 0; i < salesData.length; i++) { + const [region, product, quantity, price, date] = salesData[i]; + await sql`INSERT INTO sales_data VALUES (${i + 1}, ${region}, ${product}, ${quantity}, ${price}, ${date})`; + } + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("basic aggregate functions", async () => { + const result = await sql` + SELECT + COUNT(*) as total_records, + SUM(quantity) as total_quantity, + AVG(price) as avg_price, + MIN(quantity) as min_quantity, + MAX(quantity) as max_quantity, + GROUP_CONCAT(DISTINCT region) as all_regions + FROM sales_data + `; + + expect(result[0].total_records).toBe(8); + expect(result[0].total_quantity).toBe(91); + expect(result[0].avg_price).toBeCloseTo(44.0625, 2); // (5*25.5 + 3*75.0) / 8 + expect(result[0].min_quantity).toBe(3); + expect(result[0].max_quantity).toBe(20); + expect(result[0].all_regions.split(",")).toHaveLength(4); + }); + + test("GROUP BY with HAVING", async () => { + const result = await sql` + SELECT + region, + SUM(quantity * price) as total_sales, + COUNT(*) as transaction_count + FROM sales_data + GROUP BY region + HAVING SUM(quantity * price) > 500 + ORDER BY total_sales DESC + `; + + expect(result.length).toBeGreaterThan(0); + result.forEach(row => { + expect(row.total_sales).toBeGreaterThan(500); + }); + }); + + test("UNION ALL for subtotals (ROLLUP equivalent)", async () => { + const result = await sql` + SELECT + region, + product, + SUM(quantity) as total_quantity + FROM sales_data + GROUP BY region, product + + UNION ALL + + SELECT + region, + NULL as product, + SUM(quantity) as total_quantity + FROM sales_data + GROUP BY region + + UNION ALL + + SELECT + NULL as region, + NULL as product, + SUM(quantity) as total_quantity + FROM sales_data + + ORDER BY region NULLS LAST, product NULLS LAST + `; + + const grandTotal = result.find(r => r.region === null && r.product === null); + expect(grandTotal).toBeDefined(); + expect(grandTotal.total_quantity).toBe(91); + }); +}); + +describe("STRICT Tables", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("STRICT table type enforcement", async () => { + await sql`CREATE TABLE strict_test ( + id INTEGER PRIMARY KEY, + int_col INTEGER, + real_col REAL, + text_col TEXT, + blob_col BLOB, + any_col ANY + ) STRICT`; + + await sql`INSERT INTO strict_test VALUES (1, 42, 3.14, 'text', X'0102', 'anything')`; + + try { + await sql`INSERT INTO strict_test VALUES (2, 'not an int', 3.14, 'text', X'0102', 'anything')`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + } + + try { + await sql`INSERT INTO strict_test VALUES (3, 42, 'not a real', 'text', X'0102', 'anything')`; + expect(true).toBe(false); + } catch (err) { + expect(err).toBeInstanceOf(Error); + } + + await sql`INSERT INTO strict_test VALUES (4, 42, 3.14, 'text', X'0102', 123)`; + await sql`INSERT INTO strict_test VALUES (5, 42, 3.14, 'text', X'0102', X'ABCD')`; + + const results = await sql`SELECT * FROM strict_test ORDER BY id`; + expect(results).toHaveLength(3); + }); +}); + +describe("Virtual Tables (besides FTS)", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("json_each virtual table", async () => { + const jsonArray = JSON.stringify([1, 2, 3, 4, 5]); + + const result = await sql` + SELECT value + FROM json_each(${jsonArray}) + `; + + expect(result).toHaveLength(5); + expect(result.map(r => r.value)).toEqual([1, 2, 3, 4, 5]); + }); + + test("json_tree virtual table", async () => { + const jsonObj = JSON.stringify({ + name: "root", + children: [ + { name: "child1", value: 1 }, + { name: "child2", value: 2 }, + ], + }); + + const result = await sql` + SELECT key, value, type, path + FROM json_tree(${jsonObj}) + WHERE type != 'object' AND type != 'array' + `; + + expect(result.length).toBeGreaterThan(0); + const nameRow = result.find(r => r.key === "name" && r.value === "root"); + expect(nameRow).toBeDefined(); + }); +}); + +describe("Recursive Queries and Complex CTEs", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("factorial using recursive CTE", async () => { + const result = await sql` + WITH RECURSIVE factorial(n, fact) AS ( + SELECT 1, 1 + UNION ALL + SELECT n + 1, fact * (n + 1) + FROM factorial + WHERE n < 10 + ) + SELECT n, fact FROM factorial + `; + + expect(result).toHaveLength(10); + expect(result[0].fact).toBe(1); + expect(result[9].fact).toBe(3628800); + }); + + test("Fibonacci sequence", async () => { + const result = await sql` + WITH RECURSIVE fib(n, a, b) AS ( + SELECT 1, 0, 1 + UNION ALL + SELECT n + 1, b, a + b + FROM fib + WHERE n < 10 + ) + SELECT n, a as fibonacci FROM fib + `; + + expect(result).toHaveLength(10); + expect(result[0].fibonacci).toBe(0); + expect(result[9].fibonacci).toBe(34); + }); + + test("tree traversal with path", async () => { + await sql`CREATE TABLE tree ( + id INTEGER PRIMARY KEY, + parent_id INTEGER, + name TEXT + )`; + + await sql`INSERT INTO tree VALUES + (1, NULL, 'root'), + (2, 1, 'branch1'), + (3, 1, 'branch2'), + (4, 2, 'leaf1'), + (5, 2, 'leaf2'), + (6, 3, 'leaf3')`; + + const result = await sql` + WITH RECURSIVE tree_path AS ( + SELECT id, parent_id, name, name as path, 0 as depth + FROM tree + WHERE parent_id IS NULL + UNION ALL + SELECT t.id, t.parent_id, t.name, + tp.path || '/' || t.name as path, + tp.depth + 1 as depth + FROM tree t + JOIN tree_path tp ON t.parent_id = tp.id + ) + SELECT * FROM tree_path + ORDER BY path + `; + + expect(result).toHaveLength(6); + expect(result[0].path).toBe("root"); + expect(result[result.length - 1].depth).toBe(2); + }); +}); + +describe("Mathematical and String Functions", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("mathematical functions", async () => { + const result = await sql` + SELECT + ABS(-42) as abs_val, + ROUND(3.14159, 2) as rounded, + MIN(1, 2, 3) as min_val, + MAX(1, 2, 3) as max_val + `; + + expect(result[0].abs_val).toBe(42); + expect(result[0].rounded).toBe(3.14); + expect(result[0].min_val).toBe(1); + expect(result[0].max_val).toBe(3); + }); + + test("string functions", async () => { + const result = await sql` + SELECT + LENGTH('Hello') as str_length, + UPPER('hello') as uppercase, + LOWER('HELLO') as lowercase, + TRIM(' hello ') as trimmed, + LTRIM(' hello') as left_trimmed, + RTRIM('hello ') as right_trimmed, + SUBSTR('Hello World', 7, 5) as substring, + REPLACE('Hello World', 'World', 'SQLite') as replaced, + INSTR('Hello World', 'World') as position, + PRINTF('%d-%02d-%02d', 2024, 1, 5) as formatted, + HEX('ABC') as hex_val, + CHAR(65, 66, 67) as char_val + `; + + expect(result[0].str_length).toBe(5); + expect(result[0].uppercase).toBe("HELLO"); + expect(result[0].lowercase).toBe("hello"); + expect(result[0].trimmed).toBe("hello"); + expect(result[0].left_trimmed).toBe("hello"); + expect(result[0].right_trimmed).toBe("hello"); + expect(result[0].substring).toBe("World"); + expect(result[0].replaced).toBe("Hello SQLite"); + expect(result[0].position).toBe(7); + expect(result[0].formatted).toBe("2024-01-05"); + expect(result[0].hex_val).toBe("414243"); + expect(result[0].char_val).toBe("ABC"); + }); + + test("pattern matching with GLOB", async () => { + await sql`CREATE TABLE patterns (id INTEGER, text TEXT)`; + await sql`INSERT INTO patterns VALUES + (1, 'hello'), + (2, 'Hello'), + (3, 'HELLO'), + (4, 'hELLo'), + (5, 'world')`; + + const globResult = await sql`SELECT * FROM patterns WHERE text GLOB 'h*'`; + expect(globResult).toHaveLength(2); + + const likeResult = await sql`SELECT * FROM patterns WHERE text LIKE 'h%'`; + expect(likeResult).toHaveLength(4); + }); +}); + +describe("Edge Cases for NULL handling", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("NULL in arithmetic operations", async () => { + const result = await sql` + SELECT + NULL + 5 as null_add, + NULL * 10 as null_multiply, + NULL || 'text' as null_concat, + COALESCE(NULL, NULL, 'default') as coalesced, + IFNULL(NULL, 'replacement') as if_null, + NULLIF(5, 5) as null_if_equal, + NULLIF(5, 3) as null_if_not_equal + `; + + expect(result[0].null_add).toBeNull(); + expect(result[0].null_multiply).toBeNull(); + expect(result[0].null_concat).toBeNull(); // In SQLite, NULL || 'text' returns NULL + expect(result[0].coalesced).toBe("default"); + expect(result[0].if_null).toBe("replacement"); + expect(result[0].null_if_equal).toBeNull(); + expect(result[0].null_if_not_equal).toBe(5); + }); + + test("NULL in comparisons", async () => { + await sql`CREATE TABLE null_test (id INTEGER, value INTEGER)`; + await sql`INSERT INTO null_test VALUES (1, 10), (2, NULL), (3, 20)`; + + const eq = await sql`SELECT * FROM null_test WHERE value = NULL`; + expect(eq).toHaveLength(0); + + const isNull = await sql`SELECT * FROM null_test WHERE value IS NULL`; + expect(isNull).toHaveLength(1); + + const notNull = await sql`SELECT * FROM null_test WHERE value IS NOT NULL`; + expect(notNull).toHaveLength(2); + + const asc = await sql`SELECT * FROM null_test ORDER BY value ASC`; + expect(asc[0].value).toBeNull(); + + const desc = await sql`SELECT * FROM null_test ORDER BY value DESC`; + expect(desc[2].value).toBeNull(); + }); + + test("NULL in aggregates", async () => { + await sql`CREATE TABLE agg_null (id INTEGER, value INTEGER)`; + await sql`INSERT INTO agg_null VALUES (1, 10), (2, NULL), (3, 20), (4, NULL), (5, 30)`; + + const result = await sql` + SELECT + COUNT(*) as count_all, + COUNT(value) as count_values, + SUM(value) as sum_values, + AVG(value) as avg_values, + MAX(value) as max_value, + MIN(value) as min_value + FROM agg_null + `; + + expect(result[0].count_all).toBe(5); + expect(result[0].count_values).toBe(3); + expect(result[0].sum_values).toBe(60); + expect(result[0].avg_values).toBe(20); + expect(result[0].max_value).toBe(30); + expect(result[0].min_value).toBe(10); + }); +}); + +describe("System Tables and Introspection", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE test_table ( + id INTEGER PRIMARY KEY, + name TEXT NOT NULL, + created_at TEXT DEFAULT CURRENT_TIMESTAMP + )`; + + await sql`CREATE INDEX idx_name ON test_table(name)`; + await sql`CREATE VIEW test_view AS SELECT id, name FROM test_table`; + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("sqlite_master table", async () => { + const objects = await sql` + SELECT type, name, sql + FROM sqlite_master + WHERE type IN ('table', 'index', 'view') + ORDER BY type, name + `; + + expect(objects.length).toBeGreaterThan(0); + + const table = objects.find(o => o.type === "table" && o.name === "test_table"); + expect(table).toBeDefined(); + expect(table.sql).toContain("CREATE TABLE"); + + const index = objects.find(o => o.type === "index" && o.name === "idx_name"); + expect(index).toBeDefined(); + + const view = objects.find(o => o.type === "view" && o.name === "test_view"); + expect(view).toBeDefined(); + }); + + test("pragma table_info", async () => { + const columns = await sql`PRAGMA table_info(test_table)`; + + expect(columns).toHaveLength(3); + + const idCol = columns.find(c => c.name === "id"); + expect(idCol.pk).toBe(1); + expect(idCol.type).toBe("INTEGER"); + + const nameCol = columns.find(c => c.name === "name"); + expect(nameCol.notnull).toBe(1); + expect(nameCol.type).toBe("TEXT"); + + const createdCol = columns.find(c => c.name === "created_at"); + expect(createdCol.dflt_value).toBe("CURRENT_TIMESTAMP"); + }); + + test("pragma index_list and index_info", async () => { + const indexes = await sql`PRAGMA index_list(test_table)`; + expect(indexes.length).toBeGreaterThan(0); + + const idx = indexes.find(i => i.name === "idx_name"); + expect(idx).toBeDefined(); + + const indexInfo = await sql`PRAGMA index_info(idx_name)`; + expect(indexInfo).toHaveLength(1); + expect(indexInfo[0].name).toBe("name"); + }); +}); + +describe("Error Recovery and Database Integrity", () => { + test("handles corrupted data gracefully", async () => { + const dir = tempDirWithFiles("sqlite-corrupt-test", {}); + const dbPath = path.join(dir, "test.db"); + const sql = new SQL(`sqlite://${dbPath}`); + + await sql`CREATE TABLE test (id INTEGER PRIMARY KEY, data TEXT)`; + await sql`INSERT INTO test VALUES (1, 'test')`; + + const integrityCheck = await sql`PRAGMA integrity_check`; + expect(integrityCheck[0].integrity_check).toBe("ok"); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("foreign key cascade actions", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`PRAGMA foreign_keys = ON`; + + await sql`CREATE TABLE authors ( + id INTEGER PRIMARY KEY, + name TEXT + )`; + + await sql`CREATE TABLE books ( + id INTEGER PRIMARY KEY, + title TEXT, + author_id INTEGER, + FOREIGN KEY (author_id) REFERENCES authors(id) ON DELETE CASCADE + )`; + + await sql`INSERT INTO authors VALUES (1, 'Author 1'), (2, 'Author 2')`; + await sql`INSERT INTO books VALUES + (1, 'Book 1', 1), + (2, 'Book 2', 1), + (3, 'Book 3', 2)`; + + await sql`DELETE FROM authors WHERE id = 1`; + + const remainingBooks = await sql`SELECT * FROM books`; + expect(remainingBooks).toHaveLength(1); + expect(remainingBooks[0].author_id).toBe(2); + + await sql.close(); + }); + + test("deferred foreign key constraints", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`PRAGMA foreign_keys = ON`; + + await sql`CREATE TABLE parent (id INTEGER PRIMARY KEY)`; + await sql`CREATE TABLE child ( + id INTEGER PRIMARY KEY, + parent_id INTEGER, + FOREIGN KEY (parent_id) REFERENCES parent(id) DEFERRABLE INITIALLY DEFERRED + )`; + + await sql.begin(async tx => { + await tx`INSERT INTO child VALUES (1, 1)`; + + await tx`INSERT INTO parent VALUES (1)`; + }); + + const result = await sql`SELECT * FROM child`; + expect(result).toHaveLength(1); + + await sql.close(); + }); +}); + +describe("Temp Tables and Attached Databases", () => { + test("temporary tables", async () => { + const sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TEMP TABLE temp_data (id INTEGER, value TEXT)`; + await sql`INSERT INTO temp_data VALUES (1, 'temp')`; + + const result = await sql`SELECT * FROM temp_data`; + expect(result).toHaveLength(1); + + const tempTables = await sql`SELECT name FROM sqlite_temp_master WHERE type = 'table'`; + expect(tempTables.some(t => t.name === "temp_data")).toBe(true); + + const mainTables = await sql`SELECT name FROM sqlite_master WHERE name = 'temp_data'`; + expect(mainTables).toHaveLength(0); + + await sql.close(); + }); + + test("cross-database queries with ATTACH", async () => { + const dir = tempDirWithFiles("sqlite-attach-cross-test", {}); + const mainPath = path.join(dir, "main.db"); + const attachPath = path.join(dir, "attached.db"); + + const mainSql = new SQL(`sqlite://${mainPath}`); + await mainSql`CREATE TABLE main_table (id INTEGER, data TEXT)`; + await mainSql`INSERT INTO main_table VALUES (1, 'main data')`; + + const attachSql = new SQL(`sqlite://${attachPath}`); + await attachSql`CREATE TABLE attached_table (id INTEGER, data TEXT)`; + await attachSql`INSERT INTO attached_table VALUES (2, 'attached data')`; + await attachSql.close(); + + await mainSql`ATTACH DATABASE ${attachPath} AS attached_db`; + + const crossQuery = await mainSql` + SELECT m.data as main_data, a.data as attached_data + FROM main_table m, attached_db.attached_table a + WHERE m.id = 1 AND a.id = 2 + `; + + expect(crossQuery).toHaveLength(1); + expect(crossQuery[0].main_data).toBe("main data"); + expect(crossQuery[0].attached_data).toBe("attached data"); + + await mainSql`DETACH DATABASE attached_db`; + await mainSql.close(); + + await rm(dir, { recursive: true }); + }); +}); + +describe("Query Explain and Optimization", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE large_table ( + id INTEGER PRIMARY KEY, + category TEXT, + value INTEGER, + description TEXT + )`; + + for (let i = 1; i <= 1000; i++) { + await sql`INSERT INTO large_table VALUES ( + ${i}, + ${"category" + (i % 10)}, + ${i * 10}, + ${"description for item " + i} + )`; + } + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("EXPLAIN QUERY PLAN", async () => { + const planWithoutIndex = await sql` + EXPLAIN QUERY PLAN + SELECT * FROM large_table WHERE category = 'category5' + `; + + expect(planWithoutIndex[0]).toMatchObject({ + detail: "SCAN large_table", + id: expect.any(Number), + parent: 0, + }); + + await sql`CREATE INDEX idx_category ON large_table(category)`; + + const planWithIndex = await sql` + EXPLAIN QUERY PLAN + SELECT * FROM large_table WHERE category = 'category5' + `; + + expect(planWithIndex[0]).toMatchObject({ + detail: "SEARCH large_table USING INDEX idx_category (category=?)", + id: expect.any(Number), + parent: 0, + }); + }); +}); + +describe("Query Normalization Fuzzing Tests", () => { + let sql: SQL; + + beforeAll(async () => { + sql = new SQL("sqlite://:memory:"); + + await sql`CREATE TABLE test_table (id INTEGER, name TEXT, value REAL)`; + await sql`CREATE TABLE "weird-table" (col1 TEXT, "col-2" INTEGER)`; + await sql`CREATE TABLE [bracket table] ([col 1] TEXT, [col 2] INTEGER)`; + await sql`CREATE TABLE \`backtick\` (\`col\` TEXT)`; + }); + + afterAll(async () => { + await sql?.close(); + }); + + test("handles CTEs with various syntax styles", async () => { + const cte1 = await sql.unsafe(` + WITH cte AS (SELECT 1 as n) + SELECT * FROM cte + `); + expect(cte1[0].n).toBe(1); + + const cte2 = await sql.unsafe(` + WITH + cte1 AS (SELECT 1 as n), + cte2 AS (SELECT 2 as n), + cte3 AS (SELECT n * 2 as doubled FROM cte1) + SELECT * FROM cte3 + `); + expect(cte2[0].doubled).toBe(2); + + const cte3 = await sql.unsafe(` + WITH RECURSIVE cnt(x) AS ( + SELECT 1 + UNION ALL + SELECT x+1 FROM cnt WHERE x<5 + ) + SELECT * FROM cnt + `); + expect(cte3).toHaveLength(5); + + const cte4 = await sql.unsafe(` + WITH /* comment */ cte AS ( + SELECT + 1 as n -- inline comment + ) SELECT * FROM cte + `); + expect(cte4[0].n).toBe(1); + }); + + test("handles window functions with complex syntax", async () => { + await sql`INSERT INTO test_table VALUES (1, 'a', 10.5), (2, 'b', 20.5), (3, 'a', 30.5)`; + + const win1 = await sql.unsafe(` + SELECT + name, + value, + ROW_NUMBER() OVER (ORDER BY value) as rn + FROM test_table + `); + expect(win1).toHaveLength(3); + + const win2 = await sql.unsafe(` + SELECT + name, + value, + ROW_NUMBER() OVER w1 as rn, + RANK() OVER w1 as rank, + DENSE_RANK() OVER w1 as dense_rank, + LAG(value, 1, 0) OVER (ORDER BY id) as prev_value, + LEAD(value) OVER (ORDER BY id) as next_value, + FIRST_VALUE(value) OVER w2 as first_val, + LAST_VALUE(value) OVER w2 as last_val, + NTH_VALUE(value, 2) OVER w2 as second_val + FROM test_table + WINDOW + w1 AS (PARTITION BY name ORDER BY value DESC), + w2 AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) + `); + expect(win2).toHaveLength(3); + + const win3 = await sql.unsafe(` + SELECT + value, + SUM(value) OVER ( + ORDER BY id + ROWS BETWEEN 2 PRECEDING AND CURRENT ROW + ) as rolling_sum, + AVG(value) OVER ( + ORDER BY id + RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW + ) as cumulative_avg + FROM test_table + `); + expect(win3).toHaveLength(3); + }); + + test("handles UPSERT with various conflict resolution strategies", async () => { + await sql`CREATE TABLE upsert_test (id INTEGER PRIMARY KEY, value TEXT UNIQUE, count INTEGER DEFAULT 0)`; + + await sql` + INSERT OR REPLACE INTO upsert_test (id, value) VALUES (1, 'test') + `; + + await sql` + INSERT OR IGNORE INTO upsert_test (id, value) VALUES (1, 'ignored') + `; + + await sql` + INSERT INTO upsert_test (id, value, count) VALUES (1, 'test', 1) + ON CONFLICT(id) DO UPDATE SET + count = excluded.count + upsert_test.count, + value = excluded.value || ' updated' + `; + + await sql` + INSERT INTO upsert_test (id, value, count) VALUES (2, 'test', 5) + ON CONFLICT(value) DO UPDATE SET + count = excluded.count + WHERE excluded.count > upsert_test.count + `; + + try { + await sql`INSERT OR ABORT INTO upsert_test (id) VALUES (1)`; + } catch {} + + try { + await sql`INSERT OR FAIL INTO upsert_test (id) VALUES (1)`; + } catch {} + }); + + test("handles complex JOIN syntax variations", async () => { + const join1 = await sql.unsafe(` + SELECT * FROM test_table + NATURAL JOIN test_table t2 + `); + + const join2 = await sql.unsafe(` + SELECT * FROM test_table t1 + JOIN test_table t2 USING (id) + `); + + const join3 = await sql.unsafe(` + SELECT * FROM test_table t1 + LEFT JOIN test_table t2 ON t1.id = t2.id + RIGHT OUTER JOIN test_table t3 ON t2.id = t3.id + FULL OUTER JOIN test_table t4 ON t3.id = t4.id + CROSS JOIN test_table t5 + INNER JOIN test_table t6 ON 1=1 + `); + + const join4 = await sql.unsafe(` + SELECT * FROM test_table t1 + JOIN test_table t2 ON ( + t1.id = t2.id + AND t1.name = t2.name + OR t1.value > t2.value + AND EXISTS (SELECT 1 FROM test_table WHERE id = t1.id) + ) + `); + }); + + test("handles weird but valid identifier quoting", async () => { + await sql` + SELECT + [bracket table].[col 1], + "weird-table"."col-2", + \`backtick\`.\`col\`, + test_table.id + FROM [bracket table], "weird-table", \`backtick\`, test_table + `; + + await sql`CREATE TABLE "table""with""quotes" ("col""umn" TEXT)`; + await sql`SELECT "col""umn" FROM "table""with""quotes"`; + + await sql`CREATE TABLE "测试表" ("列名" TEXT)`; + await sql`SELECT "列名" FROM "测试表"`; + + await sql`CREATE TABLE "SELECT" ("FROM" TEXT, "WHERE" INTEGER)`; + await sql`SELECT "FROM", "WHERE" FROM "SELECT"`; + }); + + test("handles complex string literals and escaping", async () => { + await sql`SELECT 'It''s a test' as str`; + + await sql`SELECT 'Hello' || ' ' || 'World' as greeting`; + + await sql`SELECT X'48656C6C6F' as hex_string`; + + await sql`SELECT x'0123456789ABCDEF' as blob_data`; + + try { + await sql`SELECT 'Line 1\nLine 2\tTabbed' as escaped`; + } catch {} + + await sql`SELECT '测试' as unicode_str`; + }); + + test("handles PRAGMA statements with various formats", async () => { + await sql`PRAGMA table_info(test_table)`; + + await sql`PRAGMA cache_size = 2000`; + + await sql`PRAGMA table_info('test_table')`; + + await sql` + PRAGMA foreign_keys = ON; + PRAGMA journal_mode = WAL; + PRAGMA synchronous = NORMAL; + `; + + await sql`PRAGMA main.table_info('test_table')`; + }); + + test("handles VACUUM and other maintenance commands", async () => { + await sql`VACUUM`; + + const tempDb = `/tmp/test_vacuum_${Date.now()}.db`; + try { + await sql`VACUUM INTO '${tempDb}'`; + } catch {} + + await sql`ANALYZE`; + await sql`ANALYZE test_table`; + await sql`ANALYZE main.test_table`; + + try { + await sql`REINDEX`; + await sql`REINDEX test_table`; + } catch {} + }); + + test("handles triggers with complex syntax", async () => { + await sql` + CREATE TRIGGER IF NOT EXISTS my_trigger + AFTER INSERT ON test_table + BEGIN + SELECT 1; + END + `; + + await sql` + CREATE TRIGGER complex_trigger + BEFORE UPDATE OF name, value ON test_table + FOR EACH ROW + WHEN NEW.value > OLD.value + BEGIN + SELECT NEW.value; + SELECT OLD.value; + UPDATE test_table SET value = NEW.value WHERE id != NEW.id; + END + `; + + await sql`CREATE VIEW test_view AS SELECT * FROM test_table`; + await sql` + CREATE TRIGGER view_trigger + INSTEAD OF INSERT ON test_view + BEGIN + INSERT INTO test_table VALUES (NEW.id, NEW.name, NEW.value); + END + `; + }); + + test("handles RETURNING clause variations", async () => { + const res1 = await sql.unsafe(` + INSERT INTO test_table (name, value) VALUES ('test', 100) + RETURNING * + `); + expect(res1).toHaveLength(1); + + const res2 = await sql.unsafe(` + UPDATE test_table SET value = value * 2 + WHERE name = 'test' + RETURNING id, value as new_value, value/2 as old_value + `); + + const res3 = await sql.unsafe(` + DELETE FROM test_table + WHERE value > 1000 + RETURNING id, name + `); + }); + + test("handles VALUES clause as table constructor", async () => { + const vals1 = await sql.unsafe(` + SELECT 1 as a, 'a' as b + UNION ALL SELECT 2, 'b' + UNION ALL SELECT 3, 'c' + `); + expect(vals1).toHaveLength(3); + + const vals2 = await sql.unsafe(` + WITH t(num, letter) AS ( + SELECT 1, 'x' + UNION ALL SELECT 2, 'y' + UNION ALL SELECT 3, 'z' + ) + SELECT * FROM t + `); + expect(vals2).toHaveLength(3); + + const vals3 = await sql.unsafe(` + SELECT 1 + 1 as col1, UPPER('hello') as col2 + UNION ALL + SELECT 2 * 3, LOWER('WORLD') + UNION ALL + SELECT (SELECT COUNT(*) FROM test_table), 'count' + `); + expect(vals3).toHaveLength(3); + }); + + test("handles complex CASE expressions", async () => { + await sql` + SELECT + CASE name + WHEN 'a' THEN 'Alpha' + WHEN 'b' THEN 'Beta' + ELSE 'Other' + END as name_full + FROM test_table + `; + + await sql` + SELECT + CASE + WHEN value < 10 AND name = 'a' THEN 'Low A' + WHEN value BETWEEN 10 AND 20 THEN 'Medium' + WHEN value > 20 OR name IN ('x', 'y', 'z') THEN 'High or Special' + WHEN EXISTS (SELECT 1 FROM test_table t2 WHERE t2.id > test_table.id) THEN 'Has Greater' + ELSE 'Default' + END as category + FROM test_table + `; + + await sql` + SELECT + CASE + WHEN value > 50 THEN + CASE name + WHEN 'a' THEN 'High A' + ELSE 'High Other' + END + ELSE 'Low' + END as nested_category + FROM test_table + `; + }); + + test("handles complex subqueries and correlated subqueries", async () => { + await sql` + SELECT + name, + (SELECT COUNT(*) FROM test_table t2 WHERE t2.name = t1.name) as name_count, + (SELECT MAX(value) FROM test_table t2 WHERE t2.id < t1.id) as max_before + FROM test_table t1 + `; + + await sql` + SELECT * FROM ( + SELECT * FROM test_table t1 + WHERE value > (SELECT AVG(value) FROM test_table t2 WHERE t2.name = t1.name) + ) subq + `; + + await sql` + SELECT * FROM test_table t1 + WHERE EXISTS ( + SELECT 1 FROM test_table t2 + WHERE t2.id != t1.id + AND t2.value > t1.value + ) + AND NOT EXISTS ( + SELECT 1 FROM test_table t3 + WHERE t3.name = t1.name + AND t3.id < t1.id + ) + `; + + await sql` + SELECT * FROM test_table + WHERE id IN (SELECT id FROM test_table WHERE value > 10) + AND name NOT IN (SELECT DISTINCT name FROM test_table WHERE value < 5) + `; + + await sql` + UPDATE test_table SET value = ( + SELECT AVG(value) FROM test_table t2 + WHERE t2.name = test_table.name + ) + WHERE id IN (SELECT id FROM test_table WHERE name = 'a') + `; + }); + + test("handles weird spacing, comments and formatting", async () => { + await sql`SELECT*FROM test_table WHERE id=1 AND name='a'OR value>10`; + + await sql` + SELECT + + + id , + + name + + FROM + + + test_table + + WHERE + + id = 1 + `; + + await sql` + /* start */ SELECT /* mid */ * /* comment */ FROM /* another */ test_table + -- line comment + WHERE id = 1 -- inline comment + /* multi + line + comment */ AND name = 'test' + `; + + await sql` + SELECT + id, -- comment 1 + /* comment 2 */ name, + value -- comment 3 + /* comment 4 */ + FROM test_table + /* WHERE clause comment */ + WHERE /* inline */ id /* another */ = /* more */ 1 + `; + }); + + test("handles special SQLite syntax features", async () => { + try { + await sql` + SELECT * FROM test_table INDEXED BY sqlite_autoindex_test_table_1 + WHERE id = 1 + `; + } catch {} + + await sql` + SELECT * FROM test_table NOT INDEXED + WHERE id = 1 + `; + + await sql` + SELECT * FROM test_table + WHERE name GLOB 'a*' + `; + + try { + await sql` + SELECT * FROM test_table + WHERE name MATCH 'search query' + `; + } catch {} + + await sql` + SELECT * FROM test_table + WHERE (id, name) IN ((1, 'a'), (2, 'b')) + `; + + await sql` + SELECT * FROM test_table + WHERE value IS NOT NULL + AND name IS NOT 'test' + `; + }); + + test("handles table-valued functions", async () => { + await sql` + SELECT * FROM json_each('["a", "b", "c"]') + `; + + await sql` + SELECT * FROM json_tree('{"a": [1, 2], "b": {"c": 3}}') + `; + + try { + await sql` + SELECT value FROM generate_series(1, 10, 2) + `; + } catch {} + + await sql` + SELECT * FROM test_table + JOIN json_each('["a", "b"]') ON test_table.name = json_each.value + `; + }); + + test("handles COLLATE clauses", async () => { + await sql`CREATE TABLE collate_test (name TEXT COLLATE NOCASE)`; + + await sql` + SELECT * FROM test_table + WHERE name = 'A' COLLATE NOCASE + `; + + await sql` + SELECT * FROM test_table + ORDER BY name COLLATE NOCASE DESC + `; + + await sql` + SELECT * FROM test_table + WHERE name COLLATE BINARY = 'a' + ORDER BY name COLLATE NOCASE, value COLLATE RTRIM + `; + }); + + test("handles date/time functions with complex formatting", async () => { + await sql` + SELECT + datetime('now'), + datetime('now', '+1 day', '-1 hour', '+30 minutes'), + date('now', 'start of month', '+1 month', '-1 day'), + time('12:34:56'), + julianday('now'), + strftime('%Y-%m-%d %H:%M:%S', 'now', 'localtime'), + strftime('%s', 'now'), + unixepoch('now') + `; + + await sql` + SELECT * FROM test_table + WHERE datetime('now') > datetime('2023-01-01') + `; + }); + + test("handles savepoints and nested transactions", async () => { + await sql`SAVEPOINT sp1`; + await sql`INSERT INTO test_table VALUES (999, 'savepoint', 999)`; + await sql`SAVEPOINT sp2`; + await sql`UPDATE test_table SET value = 0 WHERE id = 999`; + await sql`ROLLBACK TO sp2`; + await sql`RELEASE sp1`; + + await sql` + SAVEPOINT outer; + SAVEPOINT inner; + ROLLBACK TO inner; + RELEASE outer; + `; + }); + + test("handles extremely nested queries", async () => { + await sql` + SELECT * FROM ( + SELECT * FROM ( + SELECT * FROM ( + SELECT * FROM ( + SELECT * FROM test_table + ) l4 + ) l3 + ) l2 + ) l1 + `; + + await sql` + SELECT + CASE + WHEN (value + (10 * (20 - (30 / (40 + (50 - 60)))))) > 0 + THEN ((((1 + 2) * 3) - 4) / 5) + ELSE (((((6))))) + END as nested_calc + FROM test_table + `; + + await sql` + SELECT + CASE + WHEN id = 1 THEN + CASE + WHEN value > 10 THEN + CASE + WHEN name = 'a' THEN 'A1>10' + ELSE 'Other1>10' + END + ELSE 'Low1' + END + ELSE 'NotOne' + END as super_nested + FROM test_table + `; + }); + + test("handles FILTER clauses on aggregate functions", async () => { + await sql` + SELECT + COUNT(*) FILTER (WHERE value > 10) as high_count, + SUM(value) FILTER (WHERE name = 'a') as a_sum, + AVG(value) FILTER (WHERE id < 5) as early_avg + FROM test_table + `; + + await sql` + SELECT + SUM(value) FILTER (WHERE name = 'a') OVER (ORDER BY id) as filtered_sum + FROM test_table + `; + + await sql` + SELECT + COUNT(*) FILTER (WHERE value > 10 AND name = 'a') as complex_filter, + MAX(value) FILTER (WHERE id IN (1,2,3)) as id_filter + FROM test_table + GROUP BY name + `; + }); + + test("handles special numeric literals", async () => { + await sql`SELECT 1.23e10, 4.56E-7, .5e2, 9.`; + + await sql`SELECT 0x1234, 0xDEADBEEF, 0xffffffff`; + + await sql`SELECT 1e308 * 10, 0.0 / 0.0`; + + await sql` + SELECT + 999999999999999999999999999999999999999, + 0.000000000000000000000000000000000001 + `; + }); + + test("handles compound SELECT statements", async () => { + expect( + await sql` + SELECT id, name FROM test_table + UNION + SELECT id + 100, 'union' FROM test_table + `.execute(), + ).toMatchInlineSnapshot(` + [ + { + "id": null, + "name": "test", + }, + { + "id": null, + "name": "union", + }, + { + "id": 1, + "name": "a", + }, + { + "id": 2, + "name": "b", + }, + { + "id": 3, + "name": "a", + }, + { + "id": 101, + "name": "union", + }, + { + "id": 102, + "name": "union", + }, + { + "id": 103, + "name": "union", + }, + { + "id": 999, + "name": "savepoint", + }, + { + "id": 1099, + "name": "union", + }, + ] + `); + + expect( + await sql` + SELECT * FROM test_table + UNION ALL + SELECT * FROM test_table + `.execute(), + ).toMatchInlineSnapshot(` + [ + { + "id": 1, + "name": "a", + "value": 20.5, + }, + { + "id": 2, + "name": "b", + "value": 20.5, + }, + { + "id": 3, + "name": "a", + "value": 20.5, + }, + { + "id": null, + "name": "test", + "value": 200, + }, + { + "id": 999, + "name": "savepoint", + "value": 999, + }, + { + "id": 1, + "name": "a", + "value": 20.5, + }, + { + "id": 2, + "name": "b", + "value": 20.5, + }, + { + "id": 3, + "name": "a", + "value": 20.5, + }, + { + "id": null, + "name": "test", + "value": 200, + }, + { + "id": 999, + "name": "savepoint", + "value": 999, + }, + ] + `); + + expect( + await sql` + SELECT name FROM test_table WHERE value > 10 + INTERSECT + SELECT name FROM test_table WHERE id < 5 + `.execute(), + ).toMatchInlineSnapshot(` + [ + { + "name": "a", + }, + { + "name": "b", + }, + ] + `); + + expect( + await sql` + SELECT * FROM test_table + EXCEPT + SELECT * FROM test_table WHERE name = 'excluded' + `.execute(), + ).toMatchInlineSnapshot(` + [ + { + "id": null, + "name": "test", + "value": 200, + }, + { + "id": 1, + "name": "a", + "value": 20.5, + }, + { + "id": 2, + "name": "b", + "value": 20.5, + }, + { + "id": 3, + "name": "a", + "value": 20.5, + }, + { + "id": 999, + "name": "savepoint", + "value": 999, + }, + ] + `); + + expect( + await sql` + SELECT id FROM test_table WHERE value > 20 + UNION + SELECT id FROM test_table WHERE name = 'a' + EXCEPT + SELECT id FROM test_table WHERE id > 100 + INTERSECT + SELECT id FROM test_table WHERE value < 50 + `.execute(), + ).toMatchInlineSnapshot(` + [ + { + "id": 1, + }, + { + "id": 2, + }, + { + "id": 3, + }, + ] + `); + + expect( + await sql` + SELECT * FROM test_table WHERE value > 10 + UNION ALL + SELECT * FROM test_table WHERE value <= 10 + ORDER BY value DESC + LIMIT 5 + `.execute(), + ).toMatchInlineSnapshot(` + [ + { + "id": 999, + "name": "savepoint", + "value": 999, + }, + { + "id": null, + "name": "test", + "value": 200, + }, + { + "id": 1, + "name": "a", + "value": 20.5, + }, + { + "id": 2, + "name": "b", + "value": 20.5, + }, + { + "id": 3, + "name": "a", + "value": 20.5, + }, + ] + `); + }); + + test("handles CREATE TABLE with all constraint types", async () => { + await sql` + CREATE TABLE IF NOT EXISTS complex_constraints ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + email TEXT UNIQUE NOT NULL CHECK(email LIKE '%@%'), + age INTEGER CHECK(age >= 0 AND age <= 150), + status TEXT DEFAULT 'active' CHECK(status IN ('active', 'inactive', 'pending')), + parent_id INTEGER REFERENCES test_table(id) ON DELETE CASCADE ON UPDATE RESTRICT, + created_at TEXT DEFAULT CURRENT_TIMESTAMP, + data JSON CHECK(json_valid(data)), + UNIQUE(email, status), + CHECK(age > 18 OR parent_id IS NOT NULL), + FOREIGN KEY (parent_id) REFERENCES test_table(id) + ) + `; + + await sql` + CREATE TABLE strict_table ( + id INTEGER PRIMARY KEY, + int_col INT, + real_col REAL, + text_col TEXT, + blob_col BLOB, + any_col ANY + ) STRICT + `; + + await sql` + CREATE TABLE without_rowid_table ( + id INTEGER PRIMARY KEY, + value TEXT + ) WITHOUT ROWID + `; + + await sql` + CREATE TABLE generated_cols ( + radius REAL, + area REAL GENERATED ALWAYS AS (3.14159 * radius * radius) STORED, + circumference REAL GENERATED ALWAYS AS (2 * 3.14159 * radius) VIRTUAL + ) + `; + }); + + test("handles exotic but valid SQL patterns", async () => { + await sql`SELECT 'text with; semicolon' as str`; + + await sql` + SELECT + id as "SELECT", + name as "FROM", + value as "WHERE" + FROM test_table + `; + + await sql`SELECT * FROM test_table WHERE 1`; + await sql`SELECT * FROM test_table WHERE 0`; + await sql`SELECT * FROM test_table WHERE NULL`; + + await sql` + SELECT * FROM test_table + WHERE NOT NOT (value > 10) + `; + + await sql` + SELECT (((id))), ((name)), (((((value))))) + FROM (((test_table))) + WHERE ((((id = 1)))) + `; + + await sql`SELECT 1`; + await sql`SELECT 2;`; + + await sql.unsafe(`SELECT 3;;`); + await sql.unsafe(`;SELECT 4`); + await sql.unsafe(`;;SELECT 5;;`); + + await sql`CREATE TABLE weird_cols ("123" TEXT, "!" INTEGER, "@#$" REAL)`; + await sql.unsafe(`SELECT "123", "!", "@#$" FROM weird_cols`); + + const longName = "a".repeat(50_000_000); + + await sql.unsafe(`CREATE TABLE "${longName}" (col TEXT)`); + await sql.unsafe(`SELECT * FROM "${longName}"`); + await sql.unsafe(`DROP TABLE "${longName}"`); + }); + + describe("Result Modes", () => { + test("values() mode returns arrays instead of objects", async () => { + const dir = tempDirWithFiles("sqlite-values-mode", {}); + const sql = new SQL(`sqlite://${dir}/test.db`); + + await sql`CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, age INTEGER)`; + await sql`INSERT INTO users (name, age) VALUES ('Alice', 30), ('Bob', 25), ('Charlie', 35)`; + + const objectResults = await sql`SELECT id, name, age FROM users ORDER BY id`; + expect(objectResults).toHaveLength(3); + expect(objectResults[0]).toEqual({ id: 1, name: "Alice", age: 30 }); + expect(objectResults[1]).toEqual({ id: 2, name: "Bob", age: 25 }); + expect(objectResults[2]).toEqual({ id: 3, name: "Charlie", age: 35 }); + + const valuesResults = await sql`SELECT id, name, age FROM users ORDER BY id`.values(); + expect(valuesResults).toHaveLength(3); + expect(valuesResults[0]).toEqual([1, "Alice", 30]); + expect(valuesResults[1]).toEqual([2, "Bob", 25]); + expect(valuesResults[2]).toEqual([3, "Charlie", 35]); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("raw() mode returns buffers for SQLite", async () => { + const dir = tempDirWithFiles("sqlite-raw-mode", {}); + const sql = new SQL(`sqlite://${dir}/test.db`); + + await sql`CREATE TABLE test (id INTEGER, name TEXT, data BLOB, score REAL)`; + await sql`INSERT INTO test VALUES (42, 'hello', ${Buffer.from([1, 2, 3])}, 3.14)`; + + const result = await sql`SELECT * FROM test`.raw(); + expect(result).toBeArray(); + expect(result).toHaveLength(1); + + const row = result[0]; + expect(row).toBeArray(); + expect(row).toHaveLength(4); + + expect(row[0]).toBeInstanceOf(Uint8Array); + expect(row[1]).toBeInstanceOf(Uint8Array); + expect(row[2]).toBeInstanceOf(Uint8Array); + expect(row[3]).toBeInstanceOf(Uint8Array); + + const idBuf = row[0] as Uint8Array; + const idView = new DataView(idBuf.buffer, idBuf.byteOffset, idBuf.byteLength); + expect(idView.getBigInt64(0, true)).toBe(42n); + + const nameBuf = row[1] as Uint8Array; + expect(new TextDecoder().decode(nameBuf)).toBe("hello"); + + const dataBuf = row[2] as Uint8Array; + expect(Array.from(dataBuf)).toEqual([1, 2, 3]); + + const scoreBuf = row[3] as Uint8Array; + const scoreView = new DataView(scoreBuf.buffer, scoreBuf.byteOffset, scoreBuf.byteLength); + expect(scoreView.getFloat64(0, true)).toBe(3.14); + + await sql`INSERT INTO test VALUES (NULL, NULL, NULL, NULL)`; + const resultWithNull = await sql`SELECT * FROM test WHERE id IS NULL`.raw(); + expect(resultWithNull).toHaveLength(1); + const nullRow = resultWithNull[0]; + expect(nullRow[0]).toBeNull(); + expect(nullRow[1]).toBeNull(); + expect(nullRow[2]).toBeNull(); + expect(nullRow[3]).toBeNull(); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + + test("values() mode works with PRAGMA commands", async () => { + const dir = tempDirWithFiles("sqlite-values-pragma", {}); + const sql = new SQL(`sqlite://${dir}/test.db`); + + const pragmaValues = await sql`PRAGMA table_info('sqlite_master')`.values(); + expect(Array.isArray(pragmaValues)).toBe(true); + + if (pragmaValues.length > 0) { + expect(Array.isArray(pragmaValues[0])).toBe(true); + } + + expect(pragmaValues).toMatchSnapshot(); + + await sql.close(); + await rm(dir, { recursive: true }); + }); + }); +}); + +describe("Unicode & Encoding Fuzzing Tests", () => { + let sql: SQL; + + beforeEach(async () => { + sql = new SQL("sqlite://:memory:"); + }); + + afterEach(async () => { + await sql?.close(); + }); + + test("handles extensive Unicode scripts and languages", async () => { + await sql`CREATE TABLE unicode_fuzz (id INTEGER PRIMARY KEY, text_data TEXT, description TEXT)`; + + const unicodeTests = [ + // Japanese (Hiragana, Katakana, Kanji) + { text: "ひらがな", desc: "Hiragana" }, + { text: "カタカナ", desc: "Katakana" }, + { text: "漢字", desc: "Kanji" }, + { text: "日本語の文章です。", desc: "Japanese sentence" }, + { text: "。・゚゚・(>_<)・゚゚・。", desc: "Japanese emoticon" }, + { text: "㊗️㊙️㊟", desc: "Circled ideographs" }, + + // Arabic (RTL) + { text: "مرحبا بالعالم", desc: "Arabic hello world" }, + { text: "السَّلَامُ عَلَيْكُمْ", desc: "Arabic with diacritics" }, + { text: "١٢٣٤٥٦٧٨٩٠", desc: "Arabic-Indic digits" }, + { text: "ﷺ", desc: "Arabic ligature" }, + { text: "ﺍﺏﺕﺙﺝﺡﺥﺩﺫﺭﺯ", desc: "Arabic presentation forms" }, + + // Hebrew (RTL) + { text: "שָׁלוֹם עוֹלָם", desc: "Hebrew with vowel points" }, + { text: "עִבְרִית", desc: "Hebrew word" }, + { text: "א״ב ג״ד", desc: "Hebrew with geresh" }, + + // Cyrillic + { text: "Привет мир", desc: "Russian" }, + { text: "Здравствуйте", desc: "Russian greeting" }, + { text: "ЁЖИК", desc: "Russian caps with Ё" }, + { text: "Ѳѳ Ѵѵ Ѱѱ", desc: "Old Cyrillic" }, + + // Greek + { text: "Γειά σου κόσμε", desc: "Greek hello world" }, + { text: "Ελληνικά", desc: "Greek word" }, + { text: "Α Β Γ Δ Ε Ζ Η Θ", desc: "Greek alphabet" }, + { text: "άέήίόύώ", desc: "Greek with tonos" }, + + // Thai + { text: "สวัสดีชาวโลก", desc: "Thai hello world" }, + { text: "ภาษาไทย", desc: "Thai language" }, + { text: "๏๐๑๒๓๔๕๖๗๘๙", desc: "Thai digits and symbols" }, + + // Korean + { text: "안녕하세요", desc: "Korean greeting" }, + { text: "한글", desc: "Hangul" }, + { text: "ㄱㄴㄷㄹㅁㅂㅅ", desc: "Korean Jamo" }, + + // Chinese + { text: "你好世界", desc: "Chinese simplified" }, + { text: "繁體中文", desc: "Traditional Chinese" }, + { text: "㊀㊁㊂㊃㊄㊅", desc: "Circled Chinese" }, + + // Devanagari (Hindi) + { text: "नमस्ते दुनिया", desc: "Hindi hello world" }, + { text: "अआइईउऊऋॠ", desc: "Devanagari vowels" }, + { text: "०१२३४५६७८९", desc: "Devanagari digits" }, + + // Tamil + { text: "வணக்கம் உலகம்", desc: "Tamil hello world" }, + { text: "தமிழ்", desc: "Tamil word" }, + + // Emoji sequences + { text: "👨‍👩‍👧‍👦", desc: "Family emoji ZWJ sequence" }, + { text: "👨🏻‍💻", desc: "Man technologist with skin tone" }, + { text: "🏳️‍🌈", desc: "Rainbow flag" }, + { text: "🧑‍🤝‍🧑", desc: "People holding hands" }, + { text: "👁️‍🗨️", desc: "Eye in speech bubble" }, + { text: "🏴󠁧󠁢󠁥󠁮󠁧󠁿", desc: "England flag" }, + { text: "🏴󠁧󠁢󠁳󠁣󠁴󠁿", desc: "Scotland flag" }, + { text: "🏴󠁧󠁢󠁷󠁬󠁳󠁿", desc: "Wales flag" }, + + // Mathematical symbols + { text: "∀∃∅∈∉⊂⊃⊆⊇", desc: "Set theory symbols" }, + { text: "∫∬∭∮∯∰", desc: "Integral symbols" }, + { text: "√∛∜", desc: "Root symbols" }, + { text: "𝕳𝖊𝖑𝖑𝖔", desc: "Mathematical bold Fraktur" }, + { text: "𝓗𝓮𝓵𝓵𝓸", desc: "Mathematical bold script" }, + { text: "𝒽ℯ𝓁𝓁ℴ", desc: "Mathematical italic" }, + + // Combining characters + { text: "e\u0301", desc: "e with combining acute" }, + { text: "n\u0303", desc: "n with combining tilde" }, + { text: "a\u0300\u0301\u0302\u0303\u0304", desc: "a with multiple combining marks" }, + { text: "Z̴̧̢̛͔̳̮̤̣̈́̊̄͒a̸̧̨̺̯̟̯̿̈́͊̕l̶̢̜̦̣̇̆̾g̸̨̣̲̈́͊̍̕ȏ̷̧̜̠̣̊", desc: "Zalgo text" }, + + // Zero-width characters + { text: "test\u200Bword", desc: "Zero-width space" }, + { text: "test\u200Cword", desc: "Zero-width non-joiner" }, + { text: "test\u200Dword", desc: "Zero-width joiner" }, + { text: "test\uFEFFword", desc: "Zero-width no-break space" }, + + // RTL/LTR mixing + { text: "Hello שלום World", desc: "Mixed LTR/RTL" }, + { text: "العربية English עברית", desc: "Multiple script directions" }, + { text: "\u202Eevil text", desc: "RLO override" }, + { text: "\u202Dforce LTR\u202C", desc: "LTR override with pop" }, + + // Special Unicode blocks + { text: "♠♣♥♦", desc: "Card suits" }, + { text: "☀☁☂☃☄★☆", desc: "Weather symbols" }, + { text: "♈♉♊♋♌♍♎♏", desc: "Zodiac symbols" }, + { text: "⚀⚁⚂⚃⚄⚅", desc: "Dice faces" }, + { text: "❶❷❸❹❺❻❼❽❾❿", desc: "Circled numbers" }, + + // Box drawing + { text: "┌─┬─┐│ ││ │├─┼─┤└─┴─┘", desc: "Box drawing characters" }, + { text: "╔═╦═╗║ ║║ ║╠═╬═╣╚═╩═╝", desc: "Double box drawing" }, + + // Currency symbols + { text: "$€£¥₹₽₩₨₪₫₱", desc: "Currency symbols" }, + + // Superscript/Subscript + { text: "x²y³z⁴", desc: "Superscript" }, + { text: "H₂O", desc: "Subscript" }, + + // Weird UTF-8 edge cases + { text: "\uD800", desc: "High surrogate (invalid alone)" }, + { text: "\uDFFF", desc: "Low surrogate (invalid alone)" }, + { text: "\uFFFD", desc: "Replacement character" }, + { text: "\uFFFE", desc: "Byte order mark inverse" }, + { text: String.fromCodePoint(0x10ffff), desc: "Max valid Unicode" }, + { text: String.fromCodePoint(0x1f4a9), desc: "Pile of poo emoji" }, + + // Various quote marks + { text: `''‚""„`, desc: "Various quotes" }, + { text: "«»‹›", desc: "Guillemets" }, + { text: "「」『』", desc: "CJK quotes" }, + + // Control characters mixed with text + { text: "hello\x00world", desc: "Null in middle" }, + { text: "tab\there", desc: "Tab character" }, + { text: "line\nbreak", desc: "Newline" }, + { text: "carriage\rreturn", desc: "Carriage return" }, + + // Long repetitive Unicode + { text: "🎉".repeat(100), desc: "100 party emojis" }, + { text: "あ".repeat(500), desc: "500 Japanese characters" }, + { text: "۝".repeat(200), desc: "200 Arabic symbols" }, + + // Mixed everything chaos + { text: "Hello世界مرحبا🌍שלום мир🎉", desc: "Multiple scripts and emoji" }, + { text: "a̐éö̲ūï̍œ̃", desc: "Latin with various diacritics" }, + { text: "㊗️エンコーディング🎌テスト✨", desc: "Japanese with emoji" }, + ]; + + // Insert all test cases + for (let i = 0; i < unicodeTests.length; i++) { + const { text, desc } = unicodeTests[i]; + await sql`INSERT INTO unicode_fuzz VALUES (${i}, ${text}, ${desc})`; + } + + // Verify all data was stored and retrieved correctly + for (let i = 0; i < unicodeTests.length; i++) { + const { text, desc } = unicodeTests[i]; + const result = await sql`SELECT text_data, description FROM unicode_fuzz WHERE id = ${i}`; + expect(result).toHaveLength(1); + + // SQLite's actual behavior with problematic Unicode: + // - Lone surrogates (\uD800, \uDFFF) are dropped (become empty string) + // - BOM inverse (\uFFFE) is dropped (becomes empty string) + // - Null characters are preserved (not truncated) + const droppedCharacters = [ + "High surrogate (invalid alone)", + "Low surrogate (invalid alone)", + "Byte order mark inverse", + ]; + + if (droppedCharacters.includes(desc)) { + // SQLite drops these invalid UTF-8 sequences + expect(result[0].text_data).toBe(""); + } else { + // All other characters should be preserved exactly, including null bytes + expect(result[0].text_data).toBe(text); + } + expect(result[0].description).toBe(desc); + } + + // Test searching with Unicode + const arabicSearch = await sql`SELECT * FROM unicode_fuzz WHERE text_data LIKE ${"%مرحبا%"}`; + expect(arabicSearch.length).toBeGreaterThan(0); + + const emojiSearch = await sql`SELECT * FROM unicode_fuzz WHERE text_data LIKE ${"%🎉%"}`; + expect(emojiSearch.length).toBeGreaterThan(0); + }); + + test("handles Unicode in column names and table names", async () => { + // Table names with Unicode + await sql`CREATE TABLE "日本語テーブル" (id INTEGER, value TEXT)`; + await sql`INSERT INTO "日本語テーブル" VALUES (1, 'test')`; + const result1 = await sql`SELECT * FROM "日本語テーブル"`; + expect(result1).toHaveLength(1); + + // Column names with Unicode + await sql`CREATE TABLE unicode_cols ("列名" TEXT, "عمود" TEXT, "στήλη" TEXT)`; + await sql`INSERT INTO unicode_cols VALUES ('Japanese', 'Arabic', 'Greek')`; + const result2 = await sql`SELECT * FROM unicode_cols`; + expect(result2[0]["列名"]).toBe("Japanese"); + expect(result2[0]["عمود"]).toBe("Arabic"); + expect(result2[0]["στήλη"]).toBe("Greek"); + }); + + test("handles Unicode in SQL functions", async () => { + await sql`CREATE TABLE unicode_func_test (id INTEGER, text_data TEXT)`; + + const testCases = [ + { text: "HELLO WORLD", expected_lower: "hello world" }, + { text: "ЁЖИК", expected_lower: "ёжик" }, + { text: "ΔΙΑΦΟΡΆ", expected_lower: "διαφορά" }, + ]; + + for (let i = 0; i < testCases.length; i++) { + const { text } = testCases[i]; + await sql`INSERT INTO unicode_func_test VALUES (${i}, ${text})`; + } + + // Test LENGTH with Unicode + await sql`INSERT INTO unicode_func_test VALUES (100, ${"🎉🎊🎈"})`; + const lengthResult = await sql`SELECT LENGTH(text_data) as len FROM unicode_func_test WHERE id = 100`; + // Note: SQLite LENGTH returns byte count for UTF-8 + expect(lengthResult[0].len).toBeGreaterThan(0); + + // Test SUBSTR with Unicode + await sql`INSERT INTO unicode_func_test VALUES (101, ${"Hello世界"})`; + const substrResult = await sql`SELECT SUBSTR(text_data, 6, 2) as sub FROM unicode_func_test WHERE id = 101`; + expect(substrResult[0].sub).toBe("世界"); + }); + + test("handles Unicode normalization edge cases", async () => { + await sql`CREATE TABLE normalization_test (id INTEGER, text_data TEXT)`; + + // Different Unicode normalizations of "é" + const normalizations = [ + "\u00E9", // NFC: é (single character) + "e\u0301", // NFD: e + combining acute + "\u0065\u0301", // NFD explicit + ]; + + for (let i = 0; i < normalizations.length; i++) { + await sql`INSERT INTO normalization_test VALUES (${i}, ${normalizations[i]})`; + const result = await sql`SELECT text_data FROM normalization_test WHERE id = ${i}`; + expect(result[0].text_data).toBe(normalizations[i]); + } + }); + + test("handles binary data that looks like UTF-8", async () => { + await sql`CREATE TABLE binary_test (id INTEGER, data BLOB)`; + + // Invalid UTF-8 sequences + const invalidSequences = [ + Buffer.from([0xff, 0xfe, 0xfd]), // Invalid UTF-8 start bytes + Buffer.from([0xc0, 0x80]), // Overlong encoding + Buffer.from([0xed, 0xa0, 0x80]), // UTF-16 surrogate + Buffer.from([0xf4, 0x90, 0x80, 0x80]), // Code point > U+10FFFF + Buffer.from([0xc2]), // Incomplete sequence + Buffer.from([0xe0, 0x80, 0x80]), // Overlong 3-byte + Buffer.from([0xf0, 0x80, 0x80, 0x80]), // Overlong 4-byte + ]; + + for (let i = 0; i < invalidSequences.length; i++) { + await sql`INSERT INTO binary_test VALUES (${i}, ${invalidSequences[i]})`; + const result = await sql`SELECT data FROM binary_test WHERE id = ${i}`; + expect(Buffer.from(result[0].data)).toEqual(invalidSequences[i]); + } + }); + + test("handles massive Unicode string operations", async () => { + await sql`CREATE TABLE massive_unicode (id INTEGER, text_data TEXT)`; + + // Create a massive string with various Unicode + const components = ["English", "日本語", "العربية", "עברית", "Ελληνικά", "🎉", "👨‍👩‍👧‍👦", "∫∂∇", "№", "™", "©", "®"]; + + const massiveString = components.map(c => c.repeat(100)).join(" "); + + await sql`INSERT INTO massive_unicode VALUES (1, ${massiveString})`; + const result = await sql`SELECT text_data FROM massive_unicode WHERE id = 1`; + expect(result[0].text_data).toBe(massiveString); + + // Test with LIKE on massive Unicode string + const likeResult = await sql`SELECT id FROM massive_unicode WHERE text_data LIKE ${"%日本語%"}`; + expect(likeResult).toHaveLength(1); + }); + + test("handles Unicode in prepared statement parameters", async () => { + await sql`CREATE TABLE param_test (id INTEGER, text_data TEXT)`; + + const unicodeParams = [ + "🚀 Launch", + "مرحبا parameters", + "パラメータ", + "\u0000embedded null", + "tab\there", + "new\nline", + ]; + + // Test with direct parameters + for (let i = 0; i < unicodeParams.length; i++) { + const param = unicodeParams[i]; + await sql`INSERT INTO param_test VALUES (${i}, ${param})`; + } + + // Verify all parameters were handled correctly + for (let i = 0; i < unicodeParams.length; i++) { + const result = await sql`SELECT text_data FROM param_test WHERE id = ${i}`; + expect(result[0].text_data).toBe(unicodeParams[i]); + } + + // Test WHERE clause with Unicode parameter + const whereResult = await sql`SELECT * FROM param_test WHERE text_data = ${"🚀 Launch"}`; + expect(whereResult).toHaveLength(1); + expect(whereResult[0].id).toBe(0); + }); + + test("handles Unicode collation and sorting", async () => { + await sql`CREATE TABLE collation_test (id INTEGER, text_data TEXT)`; + + const sortTestData = [ + "zebra", + "Zebra", + "ZEBRA", + "äpfel", + "Äpfel", + "апельсин", + "Апельсин", + "🍎", + "🍊", + "日本", + "中国", + "한국", + ]; + + for (let i = 0; i < sortTestData.length; i++) { + await sql`INSERT INTO collation_test VALUES (${i}, ${sortTestData[i]})`; + } + + // Test ORDER BY with Unicode + const ordered = await sql`SELECT text_data FROM collation_test ORDER BY text_data`; + expect(ordered).toHaveLength(sortTestData.length); + + // Verify ordering happened (exact order depends on SQLite collation) + expect(ordered[0].text_data).toBeDefined(); + expect(ordered[ordered.length - 1].text_data).toBeDefined(); + }); + + test("handles Unicode in JSON operations", async () => { + await sql`CREATE TABLE json_unicode (id INTEGER, json_data TEXT)`; + + const jsonWithUnicode = { + english: "Hello", + japanese: "こんにちは", + arabic: "مرحبا", + emoji: "🎉🚀", + special: "a\u0301\u0302\u0303", + rtl: "Hello עברית World", + }; + + const jsonString = JSON.stringify(jsonWithUnicode); + await sql`INSERT INTO json_unicode VALUES (1, ${jsonString})`; + + const result = await sql`SELECT json_data FROM json_unicode WHERE id = 1`; + const parsed = JSON.parse(result[0].json_data); + + expect(parsed.japanese).toBe("こんにちは"); + expect(parsed.arabic).toBe("مرحبا"); + expect(parsed.emoji).toBe("🎉🚀"); + }); + + test("handles extreme edge cases and malformed sequences", async () => { + await sql`CREATE TABLE edge_cases (id INTEGER, text_data TEXT, blob_data BLOB)`; + + const edgeCases = [ + // Extremely long strings + { text: "A".repeat(10000) + "🎉".repeat(1000) + "世".repeat(1000), desc: "Very long mixed" }, + + // Boundary values + { text: String.fromCharCode(0), desc: "Null character" }, + { text: String.fromCharCode(0xd7ff), desc: "Before surrogates" }, + { text: String.fromCharCode(0xe000), desc: "After surrogates" }, + { text: String.fromCharCode(0xfffd), desc: "Replacement char" }, + + // Mixed direction markers + { text: "\u202A\u202B\u202C\u202D\u202E", desc: "All direction markers" }, + + // Variation selectors + { text: "☃️", desc: "Snowman with variation selector" }, + { text: "☃︎", desc: "Snowman text style" }, + + // Regional indicators (flags) + { text: "🇺🇸🇯🇵🇬🇧🇫🇷🇩🇪", desc: "Multiple flags" }, + + // Skin tone modifiers + { text: "👋🏻👋🏼👋🏽👋🏾👋🏿", desc: "Wave with all skin tones" }, + + // Zero width joiners in text + { text: "पार्थ", desc: "Devanagari with ZWJ" }, + + // Invisible characters + { text: "\u2060\u2061\u2062\u2063", desc: "Invisible math operators" }, + { text: "\u2028\u2029", desc: "Line and paragraph separators" }, + ]; + + for (let i = 0; i < edgeCases.length; i++) { + const { text } = edgeCases[i]; + await sql`INSERT INTO edge_cases VALUES (${i}, ${text}, ${Buffer.from(text)})`; + + const result = await sql`SELECT text_data, blob_data FROM edge_cases WHERE id = ${i}`; + expect(result[0].text_data).toBe(text); + expect(Buffer.from(result[0].blob_data).toString()).toBe(text); + } + }); +}); diff --git a/test/js/sql/sqlite-url-parsing.test.ts b/test/js/sql/sqlite-url-parsing.test.ts new file mode 100644 index 0000000000..9f808e44d8 --- /dev/null +++ b/test/js/sql/sqlite-url-parsing.test.ts @@ -0,0 +1,328 @@ +import { SQL } from "bun"; +import { describe, expect, test } from "bun:test"; + +describe("SQLite URL Parsing Matrix", () => { + const protocols = [ + { prefix: "sqlite://", name: "sqlite://" }, + { prefix: "sqlite:", name: "sqlite:" }, + { prefix: "file://", name: "file://" }, + { prefix: "file:", name: "file:" }, + { prefix: "", name: "no protocol" }, // adapter specified in these ones + ] as const; + + const paths = [ + { input: ":memory:", expected: ":memory:", name: "memory database" }, + { input: "test.db", expected: "test.db", name: "simple filename" }, + { input: "./test.db", expected: "./test.db", name: "relative path" }, + { input: "../test.db", expected: "../test.db", name: "parent path" }, + { input: "path/to/test.db", expected: "path/to/test.db", name: "nested path" }, + { input: "/tmp/test.db", expected: "/tmp/test.db", name: "absolute Unix path" }, + { input: "test with spaces.db", expected: "test with spaces.db", name: "spaces in filename" }, + { input: "test#hash.db", expected: "test#hash.db", name: "hash in filename" }, + { input: "test@symbol.db", expected: "test@symbol.db", name: "@ in filename" }, + { input: "test&.db", expected: "test&.db", name: "ampersand in filename" }, + { input: "test%20encoded.db", expected: "test%20encoded.db", name: "percent encoding" }, + { input: "", expected: "", name: "empty path" }, + ] as const; + + const testMatrix = protocols + .flatMap(protocol => + paths.map(path => ({ + url: protocol.prefix + path.input, + input: path.input, + expected: path.expected, + protocolName: protocol.name, + pathName: path.name, + needsAdapter: protocol.prefix === "", + })), + ) + .filter(test => { + if (test.protocolName === "no protocol" && test.pathName === "memory database") { + return false; // :memory: without protocol is valid + } + + return true; + }); + + describe("Protocol × Path matrix", () => { + test.each(testMatrix)("$protocolName with $pathName: $url", testCase => { + if (testCase.needsAdapter) { + // Test with explicit adapter for no-protocol cases + const sql = new SQL(testCase.url, { adapter: "sqlite" }); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(testCase.expected || ":memory:"); + sql.close(); + } else { + // Test without adapter (should auto-detect SQLite) + const sql = new SQL(testCase.url); + expect(sql.options.adapter).toBe("sqlite"); + + if (testCase.protocolName === "file://") { + const filename = sql.options.filename; + // The implementation uses Bun.fileURLToPath if valid, else strips "file://" + let expected: string; + try { + expected = Bun.fileURLToPath(testCase.url); + } catch { + // Not a valid file:// URL, so implementation just strips the prefix + expected = testCase.url.slice(7); // "file://".length + } + expect(filename).toBe(expected); + } else { + expect(sql.options.filename).toBe(testCase.expected); + } + sql.close(); + } + }); + }); + + describe("Query parameters matrix", () => { + const protocolsWithQuery = ["sqlite://test.db", "sqlite:test.db", "file://test.db", "file:test.db"]; + + const queryParams = [ + { query: "", readonly: undefined, create: undefined, name: "no params" }, + { query: "?mode=ro", readonly: true, create: undefined, name: "readonly" }, + { query: "?mode=rw", readonly: false, create: undefined, name: "read-write" }, + { query: "?mode=rwc", readonly: false, create: true, name: "read-write-create" }, + { query: "?mode=invalid", readonly: undefined, create: undefined, name: "invalid mode" }, + { query: "?other=param", readonly: undefined, create: undefined, name: "other param" }, + { query: "?mode=ro&cache=shared", readonly: true, create: undefined, name: "multiple params" }, + ]; + + const queryMatrix = protocolsWithQuery.flatMap(base => + queryParams.map(param => ({ + url: base + param.query, + base: base, + ...param, + })), + ); + + test.each(queryMatrix)("$base with $name", testCase => { + const sql = new SQL(testCase.url); + + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.readonly).toBe(testCase.readonly!); + expect(sql.options.create).toBe(testCase.create!); + + if (!testCase.base.startsWith("file://")) { + expect(sql.options.filename).toBe("test.db"); + } + + sql.close(); + }); + }); + + describe("Windows-style paths matrix", () => { + const windowsPaths = [ + { input: "C:/test.db", expected: "C:/test.db", name: "forward slash drive" }, + { input: "C:\\test.db", expected: "C:\\test.db", name: "backslash drive" }, + { input: "D:/path/to/test.db", expected: "D:/path/to/test.db", name: "nested forward slash" }, + { input: "D:\\path\\to\\test.db", expected: "D:\\path\\to\\test.db", name: "nested backslash" }, + { input: "\\\\server\\share\\test.db", expected: "\\\\server\\share\\test.db", name: "UNC path" }, + { input: "C:/path\\mixed/test.db", expected: "C:/path\\mixed/test.db", name: "mixed slashes" }, + ]; + + const windowsProtocols = [ + "sqlite://", + "sqlite:", + "file:///", // Three slashes for file:// + "file:", + ]; + + const windowsMatrix = windowsProtocols.flatMap(protocol => + windowsPaths.map(path => ({ + url: protocol + path.input, + input: path.input, + expected: path.expected, + protocol: protocol, + pathName: path.name, + })), + ); + + test.each(windowsMatrix)("Windows: $protocol with $pathName", testCase => { + const sql = new SQL(testCase.url); + expect(sql.options.adapter).toBe("sqlite"); + + if (testCase.protocol.startsWith("file://")) { + const filename = sql.options.filename; + let expected: string; + try { + expected = Bun.fileURLToPath(testCase.url); + } catch { + expected = testCase.url.slice(testCase.protocol.length); + } + expect(filename).toBe(expected); + } else { + expect(sql.options.filename).toBe(testCase.expected); + } + + sql.close(); + }); + }); + + describe("Unix-style paths matrix", () => { + const unixPaths = [ + { input: "/home/user/test.db", expected: "/home/user/test.db", name: "home directory" }, + { input: "/var/lib/test.db", expected: "/var/lib/test.db", name: "system directory" }, + { input: ".hidden.db", expected: ".hidden.db", name: "hidden file" }, + { input: "~/.config/test.db", expected: "~/.config/test.db", name: "tilde path" }, + { input: "test:colon.db", expected: "test:colon.db", name: "colon in name" }, + ]; + + const unixProtocols = ["sqlite://", "sqlite:", "file://", "file:"]; + + const unixMatrix = unixProtocols.flatMap(protocol => + unixPaths.map(path => ({ + url: protocol + path.input, + input: path.input, + expected: path.expected, + protocol: protocol, + pathName: path.name, + })), + ); + + test.each(unixMatrix)("Unix: $protocol with $pathName", testCase => { + const sql = new SQL(testCase.url); + expect(sql.options.adapter).toBe("sqlite"); + + if (testCase.protocol === "file://") { + const filename = sql.options.filename; + // Same logic as above - try Bun.fileURLToPath, fallback to stripping prefix + let expected: string; + try { + expected = Bun.fileURLToPath(testCase.url); + } catch { + expected = testCase.url.slice(7); // "file://".length + } + expect(filename).toBe(expected); + } else { + expect(sql.options.filename).toBe(testCase.expected); + } + + sql.close(); + }); + }); + + describe("Special characters matrix", () => { + const specialChars = [ + { char: " ", name: "space", encoded: "%20" }, + { char: "#", name: "hash", encoded: "%23" }, + { char: "%", name: "percent", encoded: "%25" }, + { char: "&", name: "ampersand", encoded: "%26" }, + { char: "(", name: "paren open", encoded: "%28" }, + { char: ")", name: "paren close", encoded: "%29" }, + { char: "[", name: "bracket open", encoded: "%5B" }, + { char: "]", name: "bracket close", encoded: "%5D" }, + { char: "{", name: "brace open", encoded: "%7B" }, + { char: "}", name: "brace close", encoded: "%7D" }, + { char: "'", name: "single quote", encoded: "%27" }, + { char: '"', name: "double quote", encoded: "%22" }, + { char: "🎉", name: "emoji", encoded: "%F0%9F%8E%89" }, + { char: "测", name: "chinese", encoded: "%E6%B5%8B" }, + ]; + + const charMatrix = specialChars.flatMap(charInfo => [ + { + url: `sqlite://test${charInfo.char}file.db`, + expected: `test${charInfo.char}file.db`, + description: `sqlite:// with ${charInfo.name} (raw)`, + }, + { + url: `sqlite://test${charInfo.encoded}file.db`, + expected: `test${charInfo.encoded}file.db`, + description: `sqlite:// with ${charInfo.name} (encoded)`, + }, + ]); + + test.each(charMatrix)("$description", testCase => { + const sql = new SQL(testCase.url); + expect(sql.options.adapter).toBe("sqlite"); + expect(sql.options.filename).toBe(testCase.expected); + sql.close(); + }); + }); + + describe("import.meta.resolve() compatibility", () => { + test("handles URLs from import.meta.resolve()", () => { + // Use import.meta.resolve() to get the actual format for the current platform + const resolvedUrl = import.meta.resolve("./test.db"); + + const sql = new SQL(resolvedUrl); + expect(sql.options.adapter).toBe("sqlite"); + + const filename = sql.options.filename; + const expected = Bun.fileURLToPath(resolvedUrl); + expect(filename).toBe(expected); + + sql.close(); + }); + }); + + describe("Edge cases", () => { + test("handles very long paths", () => { + const longFilename = "a".repeat(255) + ".db"; + const longPath = `/tmp/${longFilename}`; + const sql = new SQL(`sqlite://${longPath}`); + expect(sql.options.filename).toBe(longPath); + sql.close(); + }); + + test("handles database with .db in middle of name", () => { + // Use a path that won't create a file in the project root + const path = "/tmp/test.db.backup"; + const sql = new SQL(`sqlite://${path}`); + expect(sql.options.filename).toBe(path); + sql.close(); + }); + + test("handles path with multiple dots", () => { + // Use a path that won't create a file in the project root + const path = "/tmp/test...db"; + const sql = new SQL(`sqlite://${path}`); + expect(sql.options.filename).toBe(path); + sql.close(); + }); + + test("empty string with adapter defaults to :memory:", () => { + const sql = new SQL("", { adapter: "sqlite" }); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + + test("null with adapter defaults to :memory:", () => { + const sql = new SQL(null as never, { adapter: "sqlite" }); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + + test("undefined with adapter defaults to :memory:", () => { + const sql = new SQL(undefined as never, { adapter: "sqlite" }); + expect(sql.options.filename).toBe(":memory:"); + sql.close(); + }); + }); + + describe("Non-SQLite protocols should use postgres", () => { + const nonSqliteUrls = [ + "http://example.com/test.db", + "https://example.com/test.db", + "ftp://example.com/test.db", + "postgres://user:pass@localhost/db", + "postgresql://user:pass@localhost/db", + ]; + + test.each(nonSqliteUrls)("treats %s as postgres", url => { + const sql = new SQL(url); + expect(sql.options.adapter).toBe("postgres"); + sql.close(); + }); + }); + + describe("Plain filenames without adapter should throw", () => { + test("plain filename without adapter throws", () => { + expect(() => new SQL("myapp.db")).toThrowErrorMatchingInlineSnapshot( + `"Invalid URL 'myapp.db' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?"`, + ); + }); + }); +}); diff --git a/test/js/sql/tls-sql.test.ts b/test/js/sql/tls-sql.test.ts index ab8e3bf073..9257b1d3f4 100644 --- a/test/js/sql/tls-sql.test.ts +++ b/test/js/sql/tls-sql.test.ts @@ -9,7 +9,7 @@ for (const options of [ { url: TLS_POSTGRES_DATABASE_URL, tls: true, - adapter: "postgresql", + adapter: "postgres", max: 1, bigint: true, prepare: true, @@ -18,23 +18,22 @@ for (const options of [ { url: PG_TRANSACTION_POOL_SUPABASE_URL, tls: true, - adapter: "postgresql", + adapter: "postgres", max: 1, bigint: true, prepare: false, transactionPool: true, }, - { url: TLS_POSTGRES_DATABASE_URL, tls: true, - adapter: "postgresql", + adapter: "postgres", max: 1, bigint: true, prepare: false, transactionPool: false, }, -]) { +] satisfies (Bun.SQL.Options & { transactionPool?: boolean })[]) { describe(`${options.transactionPool ? "Transaction Pooling" : `Prepared Statements (${options.prepare ? "on" : "off"})`}`, () => { test("default sql", async () => { expect(sql.reserve).toBeDefined(); @@ -79,6 +78,8 @@ for (const options of [ test("Throws on illegal transactions", async () => { await using sql = new SQL({ ...options, max: 2 }); const error = await sql`BEGIN`.catch(e => e); + expect(error).toBeInstanceOf(SQL.SQLError); + expect(error).toBeInstanceOf(SQL.PostgresError); return expect(error.code).toBe("ERR_POSTGRES_UNSAFE_TRANSACTION"); });