diff --git a/docs/api/redis.md b/docs/api/redis.md
new file mode 100644
index 0000000000..0cfc01b076
--- /dev/null
+++ b/docs/api/redis.md
@@ -0,0 +1,503 @@
+Bun provides native bindings for working with Redis databases with a modern, Promise-based API. The interface is designed to be simple and performant, with built-in connection management, fully typed responses, and TLS support.
+
+```ts
+import { redis } from "bun";
+
+// Set a key
+await redis.set("greeting", "Hello from Bun!");
+
+// Get a key
+const greeting = await redis.get("greeting");
+console.log(greeting); // "Hello from Bun!"
+
+// Increment a counter
+await redis.set("counter", 0);
+await redis.incr("counter");
+
+// Check if a key exists
+const exists = await redis.exists("greeting");
+
+// Delete a key
+await redis.del("greeting");
+```
+
+{% features title="Features" %}
+
+{% icon size=20 name="Bolt" /%} Fast native implementation using Zig and JavaScriptCore
+
+{% icon size=20 name="Link" /%} Automatic pipelining for better performance
+
+{% icon size=20 name="EthernetPort" /%} Auto-reconnect with exponential backoff
+
+{% icon size=20 name="Omega" /%} Support for RESP3 protocol
+
+{% icon size=20 name="Lock" /%} TLS support
+
+{% icon size=20 name="Clock" /%} Connection management with configurable timeouts
+
+{% icon size=20 name="IndentDecrease" /%} Offline command queue
+
+{% icon size=20 name="Settings" /%} Automatic configuration with environment variables
+
+{% icon size=20 name="Hash" /%} Support for hash, set, and other Redis data structures
+
+{% /features %}
+
+## Getting Started
+
+To use the Redis client, you first need to create a connection:
+
+```ts
+import { redis, RedisClient } from "bun";
+
+// Using the default client (reads connection info from environment)
+// process.env.REDIS_URL is used by default
+await redis.set("hello", "world");
+const result = await redis.get("hello");
+
+// Creating a custom client
+const client = new RedisClient("redis://username:password@localhost:6379");
+await client.set("counter", "0");
+await client.incr("counter");
+```
+
+By default, the client reads connection information from the following environment variables (in order of precedence):
+
+- `REDIS_URL`
+- If not set, defaults to `"redis://localhost:6379"`
+
+### Connection Lifecycle
+
+The Redis client automatically handles connections in the background:
+
+```ts
+// No connection is made until a command is executed
+const client = new RedisClient();
+
+// First command initiates the connection
+await client.set("key", "value");
+
+// Connection remains open for subsequent commands
+await client.get("key");
+
+// Explicitly close the connection when done
+client.disconnect();
+```
+
+You can also manually control the connection lifecycle:
+
+```ts
+const client = new RedisClient();
+
+// Explicitly connect
+await client.connect();
+
+// Run commands
+await client.set("key", "value");
+
+// Disconnect when done
+client.disconnect();
+```
+
+## Basic Operations
+
+### String Operations
+
+```ts
+// Set a key
+await redis.set("user:1:name", "Alice");
+
+// Get a key
+const name = await redis.get("user:1:name");
+
+// Delete a key
+await redis.del("user:1:name");
+
+// Check if a key exists
+const exists = await redis.exists("user:1:name");
+
+// Set expiration (in seconds)
+await redis.set("session:123", "active");
+await redis.expire("session:123", 3600); // expires in 1 hour
+
+// Get time to live (in seconds)
+const ttl = await redis.ttl("session:123");
+```
+
+### Numeric Operations
+
+```ts
+// Set initial value
+await redis.set("counter", "0");
+
+// Increment by 1
+await redis.incr("counter");
+
+// Decrement by 1
+await redis.decr("counter");
+```
+
+### Hash Operations
+
+```ts
+// Set multiple fields in a hash
+await redis.hmset("user:123", [
+ "name",
+ "Alice",
+ "email",
+ "alice@example.com",
+ "active",
+ "true",
+]);
+
+// Get multiple fields from a hash
+const userFields = await redis.hmget("user:123", ["name", "email"]);
+console.log(userFields); // ["Alice", "alice@example.com"]
+
+// Increment a numeric field in a hash
+await redis.hincrby("user:123", "visits", 1);
+
+// Increment a float field in a hash
+await redis.hincrbyfloat("user:123", "score", 1.5);
+```
+
+### Set Operations
+
+```ts
+// Add member to set
+await redis.sadd("tags", "javascript");
+
+// Remove member from set
+await redis.srem("tags", "javascript");
+
+// Check if member exists in set
+const isMember = await redis.sismember("tags", "javascript");
+
+// Get all members of a set
+const allTags = await redis.smembers("tags");
+
+// Get a random member
+const randomTag = await redis.srandmember("tags");
+
+// Pop (remove and return) a random member
+const poppedTag = await redis.spop("tags");
+```
+
+## Advanced Usage
+
+### Command Execution and Pipelining
+
+The client automatically pipelines commands, improving performance by sending multiple commands in a batch and processing responses as they arrive.
+
+```ts
+// These commands are automatically pipelined
+const [infoResult, listResult] = await Promise.all([
+ redis.sendCommand("INFO", []),
+ redis.sendCommand("LRANGE", ["mylist", "0", "-1"]),
+]);
+```
+
+> **Note**: Commands are processed in the order they are received by the server. When using `Promise.all()`, the promises will resolve in the order they complete, which may not be the same as the order they were sent if some commands take longer than others.
+
+### Raw Commands
+
+When you need to use commands that don't have convenience methods, you can use the `sendCommand` method:
+
+```ts
+// Run any Redis command
+const info = await redis.sendCommand("INFO", []);
+
+// LPUSH to a list
+await redis.sendCommand("LPUSH", ["mylist", "value1", "value2"]);
+
+// Get list range
+const list = await redis.sendCommand("LRANGE", ["mylist", "0", "-1"]);
+```
+
+The `sendCommand` method allows you to use any Redis/Redis command, even ones that don't have dedicated methods in the client. The first argument is the command name, and the second argument is an array of string arguments.
+
+### Connection Events
+
+You can register handlers for connection events:
+
+```ts
+const client = new RedisClient();
+
+// Called when successfully connected to Redis server
+client.onconnect = () => {
+ console.log("Connected to Redis server");
+};
+
+// Called when disconnected from Redis server
+client.onclose = error => {
+ console.error("Disconnected from Redis server:", error);
+};
+
+// Manually connect/disconnect
+await client.connect();
+client.disconnect();
+```
+
+### Connection Status and Monitoring
+
+```ts
+// Check if connected
+console.log(client.connected); // boolean indicating connection status
+
+// Check amount of data buffered (in bytes)
+console.log(client.bufferedAmount);
+```
+
+### Type Conversion
+
+The Redis client handles automatic type conversion for Redis/Redis responses:
+
+- Integer responses are returned as JavaScript numbers
+- Bulk strings are returned as JavaScript strings
+- Simple strings are returned as JavaScript strings
+- Null bulk strings are returned as `null`
+- Array responses are returned as JavaScript arrays
+- Error responses throw JavaScript errors with appropriate error codes
+- Boolean responses (RESP3) are returned as JavaScript booleans
+- Map responses (RESP3) are returned as JavaScript objects
+- Set responses (RESP3) are returned as JavaScript arrays
+
+Special handling for specific commands:
+
+- `EXISTS` returns a boolean instead of a number (1 becomes true, 0 becomes false)
+- `SISMEMBER` returns a boolean (1 becomes true, 0 becomes false)
+
+## Connection Options
+
+When creating a client, you can pass various options to configure the connection:
+
+```ts
+const client = new RedisClient("redis://localhost:6379", {
+ // Connection timeout in milliseconds (default: 10000)
+ connectionTimeout: 5000,
+
+ // Socket timeout in milliseconds (default: 0 = no timeout)
+ socketTimeout: 0,
+
+ // Idle timeout in milliseconds (default: 0 = no timeout)
+ idleTimeout: 30000,
+
+ // Whether to automatically reconnect on disconnection (default: true)
+ autoReconnect: true,
+
+ // Maximum number of reconnection attempts (default: 10)
+ maxRetries: 10,
+
+ // Whether to queue commands when disconnected (default: true)
+ enableOfflineQueue: true,
+
+ // TLS options (default: false)
+ tls: true,
+ // Alternatively, provide custom TLS config:
+ // tls: {
+ // rejectUnauthorized: true,
+ // ca: "path/to/ca.pem",
+ // cert: "path/to/cert.pem",
+ // key: "path/to/key.pem",
+ // }
+});
+```
+
+### Reconnection Behavior
+
+When a connection is lost, the client automatically attempts to reconnect with exponential backoff:
+
+1. The client starts with a small delay (50ms) and doubles it with each attempt
+2. Reconnection delay is capped at 2000ms (2 seconds)
+3. The client attempts to reconnect up to `maxRetries` times (default: 10)
+4. Commands executed during disconnection are:
+ - Queued if `enableOfflineQueue` is true (default)
+ - Rejected immediately if `enableOfflineQueue` is false
+
+## Supported URL Formats
+
+The Redis client supports various URL formats:
+
+```ts
+// Standard Redis/Redis URL
+new RedisClient("redis://localhost:6379");
+new RedisClient("redis://localhost:6379");
+
+// With authentication
+new RedisClient("redis://username:password@localhost:6379");
+
+// With database number
+new RedisClient("redis://localhost:6379/0");
+
+// TLS connections
+new RedisClient("rediss://localhost:6379");
+new RedisClient("rediss://localhost:6379");
+new RedisClient("redis+tls://localhost:6379");
+new RedisClient("redis+tls://localhost:6379");
+
+// Unix socket connections
+new RedisClient("redis+unix:///path/to/socket");
+new RedisClient("redis+unix:///path/to/socket");
+
+// TLS over Unix socket
+new RedisClient("redis+tls+unix:///path/to/socket");
+new RedisClient("redis+tls+unix:///path/to/socket");
+```
+
+## Error Handling
+
+The Redis client throws typed errors for different scenarios:
+
+```ts
+try {
+ await redis.get("non-existent-key");
+} catch (error) {
+ if (error.code === "ERR_REDIS_CONNECTION_CLOSED") {
+ console.error("Connection to Redis server was closed");
+ } else if (error.code === "ERR_REDIS_AUTHENTICATION_FAILED") {
+ console.error("Authentication failed");
+ } else {
+ console.error("Unexpected error:", error);
+ }
+}
+```
+
+Common error codes:
+
+- `ERR_REDIS_CONNECTION_CLOSED` - Connection to the server was closed
+- `ERR_REDIS_AUTHENTICATION_FAILED` - Failed to authenticate with the server
+- `ERR_REDIS_INVALID_RESPONSE` - Received an invalid response from the server
+- `ERR_REDIS_INVALID_RESPONSE_TYPE` - Response type not recognized
+- `ERR_REDIS_TLS_NOT_AVAILABLE` - TLS/SSL not available on this connection
+- `ERR_REDIS_TLS_UPGRADE_FAILED` - Failed to upgrade connection to TLS/SSL
+- `ERR_REDIS_INVALID_ARGUMENT` - Invalid argument passed to a command
+- `ERR_REDIS_INVALID_PASSWORD` - Invalid password when authenticating
+- `ERR_REDIS_INVALID_USERNAME` - Invalid username when authenticating
+
+## Example Use Cases
+
+### Caching
+
+```ts
+async function getUserWithCache(userId) {
+ const cacheKey = `user:${userId}`;
+
+ // Try to get from cache first
+ const cachedUser = await redis.get(cacheKey);
+ if (cachedUser) {
+ return JSON.parse(cachedUser);
+ }
+
+ // Not in cache, fetch from database
+ const user = await database.getUser(userId);
+
+ // Store in cache for 1 hour
+ await redis.set(cacheKey, JSON.stringify(user));
+ await redis.expire(cacheKey, 3600);
+
+ return user;
+}
+```
+
+### Rate Limiting
+
+```ts
+async function rateLimit(ip, limit = 100, windowSecs = 3600) {
+ const key = `ratelimit:${ip}`;
+
+ // Increment counter
+ const count = await redis.incr(key);
+
+ // Set expiry if this is the first request in window
+ if (count === 1) {
+ await redis.expire(key, windowSecs);
+ }
+
+ // Check if limit exceeded
+ return {
+ limited: count > limit,
+ remaining: Math.max(0, limit - count),
+ };
+}
+```
+
+### Session Storage
+
+```ts
+async function createSession(userId, data) {
+ const sessionId = crypto.randomUUID();
+ const key = `session:${sessionId}`;
+
+ // Store session with expiration
+ await redis.hmset(key, [
+ "userId",
+ userId.toString(),
+ "created",
+ Date.now().toString(),
+ "data",
+ JSON.stringify(data),
+ ]);
+ await redis.expire(key, 86400); // 24 hours
+
+ return sessionId;
+}
+
+async function getSession(sessionId) {
+ const key = `session:${sessionId}`;
+
+ // Get session data
+ const exists = await redis.exists(key);
+ if (!exists) return null;
+
+ const [userId, created, data] = await redis.hmget(key, [
+ "userId",
+ "created",
+ "data",
+ ]);
+
+ return {
+ userId: Number(userId),
+ created: Number(created),
+ data: JSON.parse(data),
+ };
+}
+```
+
+## Implementation Notes
+
+Bun's Redis client is implemented in Zig and uses the Redis Serialization Protocol (RESP3). It manages connections efficiently and provides automatic reconnection with exponential backoff.
+
+The client supports pipelining commands, meaning multiple commands can be sent without waiting for the replies to previous commands. This significantly improves performance when sending multiple commands in succession.
+
+### RESP3 Protocol Support
+
+Bun's Redis client uses the newer RESP3 protocol by default, which provides more data types and features compared to RESP2:
+
+- Better error handling with typed errors
+- Native Boolean responses
+- Map/Dictionary responses (key-value objects)
+- Set responses
+- Double (floating point) values
+- BigNumber support for large integer values
+
+When connecting to Redis servers using older versions that don't support RESP3, the client automatically fallbacks to compatible modes.
+
+### Performance Considerations
+
+For optimal performance with Bun's Redis client:
+
+1. **Connection Reuse**: Create a single client and reuse it for all operations
+2. **Pipelining**: Use `Promise.all()` for multiple operations when possible
+3. **Command Batching**: For operations on multiple keys, prefer batch operations (like `hmset` for hashes)
+
+## Limitations and Future Plans
+
+Current limitations of the Redis client:
+
+- Limited TypeScript return type specificity (get can return string or null, but more specific typing would be helpful)
+- No dedicated API for pub/sub functionality (though you can use the raw command API)
+- Transactions (MULTI/EXEC) must be done through raw commands for now
+- Streams are supported but without dedicated methods
+- Server-sent events not yet supported
+
+These are areas that may be improved in future versions of the client.
diff --git a/docs/nav.ts b/docs/nav.ts
index 7eff222579..5a9968911e 100644
--- a/docs/nav.ts
+++ b/docs/nav.ts
@@ -344,6 +344,9 @@ export default {
page("api/file-io", "File I/O", {
description: `Read and write files fast with Bun's heavily optimized file system API.`,
}), // "`Bun.write`"),
+ page("api/redis", "Redis client", {
+ description: `Bun provides a fast, native Redis client with automatic command pipelining for better performance.`,
+ }),
page("api/import-meta", "import.meta", {
description: `Module-scoped metadata and utilities`,
}), // "`bun:sqlite`"),
diff --git a/packages/bun-types/index.d.ts b/packages/bun-types/index.d.ts
index ee29d26f77..9e1e78842c 100644
--- a/packages/bun-types/index.d.ts
+++ b/packages/bun-types/index.d.ts
@@ -18,7 +18,7 @@
///
///
///
-
+///
///
// @ts-ignore Must disable this so it doesn't conflict with the DOM onmessage type, but still
diff --git a/packages/bun-types/redis.d.ts b/packages/bun-types/redis.d.ts
new file mode 100644
index 0000000000..3561c11d71
--- /dev/null
+++ b/packages/bun-types/redis.d.ts
@@ -0,0 +1,497 @@
+declare module "bun" {
+ export interface RedisOptions {
+ /**
+ * URL to connect to, defaults to "redis://localhost:6379"
+ * Supported protocols: redis://, rediss://, redis+unix://, redis+tls://
+ */
+ url?: string;
+
+ /**
+ * Connection timeout in milliseconds
+ * @default 10000
+ */
+ connectionTimeout?: number;
+
+ /**
+ * Idle timeout in milliseconds
+ * @default 0 (no timeout)
+ */
+ idleTimeout?: number;
+
+ /**
+ * Whether to automatically reconnect
+ * @default true
+ */
+ autoReconnect?: boolean;
+
+ /**
+ * Maximum number of reconnection attempts
+ * @default 10
+ */
+ maxRetries?: number;
+
+ /**
+ * Whether to queue commands when disconnected
+ * @default true
+ */
+ enableOfflineQueue?: boolean;
+
+ /**
+ * TLS options
+ * Can be a boolean or an object with TLS options
+ */
+ tls?:
+ | boolean
+ | {
+ key?: string | Buffer;
+ cert?: string | Buffer;
+ ca?: string | Buffer | Array;
+ rejectUnauthorized?: boolean;
+ };
+
+ /**
+ * Whether to enable auto-pipelining
+ * @default true
+ */
+ enableAutoPipelining?: boolean;
+ }
+
+ export class RedisClient {
+ /**
+ * Creates a new Redis client
+ * @param url URL to connect to, defaults to process.env.VALKEY_URL, process.env.REDIS_URL, or "valkey://localhost:6379"
+ * @param options Additional options
+ *
+ * @example
+ * ```ts
+ * const valkey = new RedisClient();
+ *
+ * await valkey.set("hello", "world");
+ *
+ * console.log(await valkey.get("hello"));
+ * ```
+ */
+ constructor(url?: string, options?: RedisOptions);
+
+ /**
+ * Whether the client is connected to the Redis server
+ */
+ readonly connected: boolean;
+
+ /**
+ * Amount of data buffered in bytes
+ */
+ readonly bufferedAmount: number;
+
+ /**
+ * Callback fired when the client connects to the Redis server
+ */
+ onconnect: ((this: RedisClient) => void) | null;
+
+ /**
+ * Callback fired when the client disconnects from the Redis server
+ * @param error The error that caused the disconnection
+ */
+ onclose: ((this: RedisClient, error: Error) => void) | null;
+
+ /**
+ * Connect to the Redis server
+ * @returns A promise that resolves when connected
+ */
+ connect(): Promise;
+
+ /**
+ * Disconnect from the Redis server
+ */
+ disconnect(): void;
+
+ /**
+ * Send a raw command to the Redis server
+ * @param command The command to send
+ * @param args The arguments to the command
+ * @returns A promise that resolves with the command result
+ */
+ send(command: string, args: string[]): Promise;
+
+ /**
+ * Get the value of a key
+ * @param key The key to get
+ * @returns Promise that resolves with the key's value, or null if the key doesn't exist
+ */
+ get(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Set the value of a key
+ * @param key The key to set
+ * @param value The value to set
+ * @returns Promise that resolves with "OK" on success
+ */
+ set(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise<"OK">;
+
+ /**
+ * Delete a key
+ * @param key The key to delete
+ * @returns Promise that resolves with the number of keys removed
+ */
+ del(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Increment the integer value of a key by one
+ * @param key The key to increment
+ * @returns Promise that resolves with the new value
+ */
+ incr(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Decrement the integer value of a key by one
+ * @param key The key to decrement
+ * @returns Promise that resolves with the new value
+ */
+ decr(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Determine if a key exists
+ * @param key The key to check
+ * @returns Promise that resolves with true if the key exists, false otherwise
+ */
+ exists(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Set a key's time to live in seconds
+ * @param key The key to set the expiration for
+ * @param seconds The number of seconds until expiration
+ * @returns Promise that resolves with 1 if the timeout was set, 0 if not
+ */
+ expire(key: string | NodeJS.TypedArray | Blob, seconds: number): Promise;
+
+ /**
+ * Get the time to live for a key in seconds
+ * @param key The key to get the TTL for
+ * @returns Promise that resolves with the TTL, -1 if no expiry, or -2 if key doesn't exist
+ */
+ ttl(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Set multiple hash fields to multiple values
+ * @param key The hash key
+ * @param fieldValues An array of alternating field names and values
+ * @returns Promise that resolves with "OK" on success
+ */
+ hmset(key: string | NodeJS.TypedArray | Blob, fieldValues: string[]): Promise;
+
+ /**
+ * Get the values of all the given hash fields
+ * @param key The hash key
+ * @param fields The fields to get
+ * @returns Promise that resolves with an array of values
+ */
+ hmget(key: string | NodeJS.TypedArray | Blob, fields: string[]): Promise>;
+
+ /**
+ * Check if a value is a member of a set
+ * @param key The set key
+ * @param member The member to check
+ * @returns Promise that resolves with true if the member exists, false otherwise
+ */
+ sismember(key: string | NodeJS.TypedArray | Blob, member: string): Promise;
+
+ /**
+ * Add a member to a set
+ * @param key The set key
+ * @param member The member to add
+ * @returns Promise that resolves with 1 if the member was added, 0 if it already existed
+ */
+ sadd(key: string | NodeJS.TypedArray | Blob, member: string): Promise;
+
+ /**
+ * Remove a member from a set
+ * @param key The set key
+ * @param member The member to remove
+ * @returns Promise that resolves with 1 if the member was removed, 0 if it didn't exist
+ */
+ srem(key: string | NodeJS.TypedArray | Blob, member: string): Promise;
+
+ /**
+ * Get all the members in a set
+ * @param key The set key
+ * @returns Promise that resolves with an array of all members
+ */
+ smembers(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get a random member from a set
+ * @param key The set key
+ * @returns Promise that resolves with a random member, or null if the set is empty
+ */
+ srandmember(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Remove and return a random member from a set
+ * @param key The set key
+ * @returns Promise that resolves with the removed member, or null if the set is empty
+ */
+ spop(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Increment the integer value of a hash field by the given number
+ * @param key The hash key
+ * @param field The field to increment
+ * @param increment The amount to increment by
+ * @returns Promise that resolves with the new value
+ */
+ hincrby(key: string | NodeJS.TypedArray | Blob, field: string, increment: string | number): Promise;
+
+ /**
+ * Increment the float value of a hash field by the given amount
+ * @param key The hash key
+ * @param field The field to increment
+ * @param increment The amount to increment by
+ * @returns Promise that resolves with the new value as a string
+ */
+ hincrbyfloat(key: string | NodeJS.TypedArray | Blob, field: string, increment: string | number): Promise;
+
+ /**
+ * Get all the fields and values in a hash
+ * @param key The hash key
+ * @returns Promise that resolves with an object containing all fields and values
+ */
+ hgetall(key: string | NodeJS.TypedArray | Blob): Promise | null>;
+
+ /**
+ * Get all field names in a hash
+ * @param key The hash key
+ * @returns Promise that resolves with an array of field names
+ */
+ hkeys(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the number of fields in a hash
+ * @param key The hash key
+ * @returns Promise that resolves with the number of fields
+ */
+ hlen(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get all values in a hash
+ * @param key The hash key
+ * @returns Promise that resolves with an array of values
+ */
+ hvals(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Find all keys matching the given pattern
+ * @param pattern The pattern to match
+ * @returns Promise that resolves with an array of matching keys
+ */
+ keys(pattern: string): Promise;
+
+ /**
+ * Get the length of a list
+ * @param key The list key
+ * @returns Promise that resolves with the length of the list
+ */
+ llen(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Remove and get the first element in a list
+ * @param key The list key
+ * @returns Promise that resolves with the first element, or null if the list is empty
+ */
+ lpop(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Remove the expiration from a key
+ * @param key The key to persist
+ * @returns Promise that resolves with 1 if the timeout was removed, 0 if the key doesn't exist or has no timeout
+ */
+ persist(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the expiration time of a key as a UNIX timestamp in milliseconds
+ * @param key The key to check
+ * @returns Promise that resolves with the timestamp, or -1 if the key has no expiration, or -2 if the key doesn't exist
+ */
+ pexpiretime(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the time to live for a key in milliseconds
+ * @param key The key to check
+ * @returns Promise that resolves with the TTL in milliseconds, or -1 if the key has no expiration, or -2 if the key doesn't exist
+ */
+ pttl(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Remove and get the last element in a list
+ * @param key The list key
+ * @returns Promise that resolves with the last element, or null if the list is empty
+ */
+ rpop(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the number of members in a set
+ * @param key The set key
+ * @returns Promise that resolves with the cardinality (number of elements) of the set
+ */
+ scard(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the length of the value stored in a key
+ * @param key The key to check
+ * @returns Promise that resolves with the length of the string value, or 0 if the key doesn't exist
+ */
+ strlen(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the number of members in a sorted set
+ * @param key The sorted set key
+ * @returns Promise that resolves with the cardinality (number of elements) of the sorted set
+ */
+ zcard(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Remove and return members with the highest scores in a sorted set
+ * @param key The sorted set key
+ * @returns Promise that resolves with the removed member and its score, or null if the set is empty
+ */
+ zpopmax(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Remove and return members with the lowest scores in a sorted set
+ * @param key The sorted set key
+ * @returns Promise that resolves with the removed member and its score, or null if the set is empty
+ */
+ zpopmin(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get one or multiple random members from a sorted set
+ * @param key The sorted set key
+ * @returns Promise that resolves with a random member, or null if the set is empty
+ */
+ zrandmember(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Append a value to a key
+ * @param key The key to append to
+ * @param value The value to append
+ * @returns Promise that resolves with the length of the string after the append operation
+ */
+ append(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Set the value of a key and return its old value
+ * @param key The key to set
+ * @param value The value to set
+ * @returns Promise that resolves with the old value, or null if the key didn't exist
+ */
+ getset(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Prepend one or multiple values to a list
+ * @param key The list key
+ * @param value The value to prepend
+ * @returns Promise that resolves with the length of the list after the push operation
+ */
+ lpush(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Prepend a value to a list, only if the list exists
+ * @param key The list key
+ * @param value The value to prepend
+ * @returns Promise that resolves with the length of the list after the push operation, or 0 if the list doesn't exist
+ */
+ lpushx(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Add one or more members to a HyperLogLog
+ * @param key The HyperLogLog key
+ * @param element The element to add
+ * @returns Promise that resolves with 1 if the HyperLogLog was altered, 0 otherwise
+ */
+ pfadd(key: string | NodeJS.TypedArray | Blob, element: string): Promise;
+
+ /**
+ * Append one or multiple values to a list
+ * @param key The list key
+ * @param value The value to append
+ * @returns Promise that resolves with the length of the list after the push operation
+ */
+ rpush(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Append a value to a list, only if the list exists
+ * @param key The list key
+ * @param value The value to append
+ * @returns Promise that resolves with the length of the list after the push operation, or 0 if the list doesn't exist
+ */
+ rpushx(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Set the value of a key, only if the key does not exist
+ * @param key The key to set
+ * @param value The value to set
+ * @returns Promise that resolves with 1 if the key was set, 0 if the key was not set
+ */
+ setnx(key: string | NodeJS.TypedArray | Blob, value: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the score associated with the given member in a sorted set
+ * @param key The sorted set key
+ * @param member The member to get the score for
+ * @returns Promise that resolves with the score of the member as a string, or null if the member or key doesn't exist
+ */
+ zscore(key: string | NodeJS.TypedArray | Blob, member: string): Promise;
+
+ /**
+ * Get the values of all specified keys
+ * @param keys The keys to get
+ * @returns Promise that resolves with an array of values, with null for keys that don't exist
+ */
+ mget(...keys: (string | NodeJS.TypedArray | Blob)[]): Promise<(string | null)[]>;
+
+ /**
+ * Count the number of set bits (population counting) in a string
+ * @param key The key to count bits in
+ * @returns Promise that resolves with the number of bits set to 1
+ */
+ bitcount(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Return a serialized version of the value stored at the specified key
+ * @param key The key to dump
+ * @returns Promise that resolves with the serialized value, or null if the key doesn't exist
+ */
+ dump(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the expiration time of a key as a UNIX timestamp in seconds
+ * @param key The key to check
+ * @returns Promise that resolves with the timestamp, or -1 if the key has no expiration, or -2 if the key doesn't exist
+ */
+ expiretime(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the value of a key and delete the key
+ * @param key The key to get and delete
+ * @returns Promise that resolves with the value of the key, or null if the key doesn't exist
+ */
+ getdel(key: string | NodeJS.TypedArray | Blob): Promise;
+
+ /**
+ * Get the value of a key and optionally set its expiration
+ * @param key The key to get
+ * @returns Promise that resolves with the value of the key, or null if the key doesn't exist
+ */
+ getex(key: string | NodeJS.TypedArray | Blob): Promise;
+ }
+
+ /**
+ * Default Redis client
+ *
+ * Connection information populated from one of, in order of preference:
+ * - `process.env.VALKEY_URL`
+ * - `process.env.REDIS_URL`
+ * - `"valkey://localhost:6379"`
+ *
+ */
+ export const redis: RedisClient;
+}
diff --git a/src/analytics/analytics_thread.zig b/src/analytics/analytics_thread.zig
index e8b529c0ca..ef07b5edc5 100644
--- a/src/analytics/analytics_thread.zig
+++ b/src/analytics/analytics_thread.zig
@@ -128,6 +128,7 @@ pub const Features = struct {
pub var process_dlopen: usize = 0;
pub var postgres_connections: usize = 0;
pub var s3: usize = 0;
+ pub var valkey: usize = 0;
pub var csrf_verify: usize = 0;
pub var csrf_generate: usize = 0;
pub var unsupported_uv_function: usize = 0;
diff --git a/src/baby_list.zig b/src/baby_list.zig
index 1426aa956e..bb491e6b90 100644
--- a/src/baby_list.zig
+++ b/src/baby_list.zig
@@ -408,3 +408,54 @@ pub fn BabyList(comptime Type: type) type {
}
};
}
+
+pub fn OffsetList(comptime Type: type) type {
+ return struct {
+ head: u32 = 0,
+ byte_list: List = .{},
+
+ const List = BabyList(Type);
+ const ThisList = @This();
+
+ pub fn init(head: u32, byte_list: List) ThisList {
+ return .{
+ .head = head,
+ .byte_list = byte_list,
+ };
+ }
+
+ pub fn write(self: *ThisList, allocator: std.mem.Allocator, bytes: []const u8) !void {
+ _ = try self.byte_list.write(allocator, bytes);
+ }
+
+ pub fn slice(this: *ThisList) []u8 {
+ return this.byte_list.slice()[0..this.head];
+ }
+
+ pub fn remaining(this: *ThisList) []u8 {
+ return this.byte_list.slice()[this.head..];
+ }
+
+ pub fn consume(self: *ThisList, bytes: u32) void {
+ self.head +|= bytes;
+ if (self.head >= self.byte_list.len) {
+ self.head = 0;
+ self.byte_list.len = 0;
+ }
+ }
+
+ pub fn len(self: *const ThisList) u32 {
+ return self.byte_list.len - self.head;
+ }
+
+ pub fn clear(self: *ThisList) void {
+ self.head = 0;
+ self.byte_list.len = 0;
+ }
+
+ pub fn deinit(self: *ThisList, allocator: std.mem.Allocator) void {
+ self.byte_list.deinitWithAllocator(allocator);
+ self.* = .{};
+ }
+ };
+}
diff --git a/src/bun.js/api/BunObject.zig b/src/bun.js/api/BunObject.zig
index 5105068633..b83f48f276 100644
--- a/src/bun.js/api/BunObject.zig
+++ b/src/bun.js/api/BunObject.zig
@@ -38,6 +38,7 @@ pub const BunObject = struct {
pub const udpSocket = toJSCallback(JSC.wrapStaticMethod(JSC.API.UDPSocket, "udpSocket", false));
pub const which = toJSCallback(Bun.which);
pub const write = toJSCallback(JSC.WebCore.Blob.writeFile);
+
// --- Callbacks ---
// --- Getters ---
@@ -71,6 +72,8 @@ pub const BunObject = struct {
pub const unsafe = toJSGetter(Bun.getUnsafe);
pub const S3Client = toJSGetter(Bun.getS3ClientConstructor);
pub const s3 = toJSGetter(Bun.getS3DefaultClient);
+ pub const ValkeyClient = toJSGetter(Bun.getValkeyClientConstructor);
+ pub const valkey = toJSGetter(Bun.getValkeyDefaultClient);
// --- Getters ---
fn getterName(comptime baseName: anytype) [:0]const u8 {
@@ -130,7 +133,8 @@ pub const BunObject = struct {
@export(&BunObject.embeddedFiles, .{ .name = getterName("embeddedFiles") });
@export(&BunObject.S3Client, .{ .name = getterName("S3Client") });
@export(&BunObject.s3, .{ .name = getterName("s3") });
-
+ @export(&BunObject.ValkeyClient, .{ .name = getterName("ValkeyClient") });
+ @export(&BunObject.valkey, .{ .name = getterName("valkey") });
// --- Getters --
// -- Callbacks --
@@ -164,6 +168,7 @@ pub const BunObject = struct {
@export(&BunObject.udpSocket, .{ .name = callbackName("udpSocket") });
@export(&BunObject.which, .{ .name = callbackName("which") });
@export(&BunObject.write, .{ .name = callbackName("write") });
+
// -- Callbacks --
}
};
@@ -1229,9 +1234,27 @@ pub fn getGlobConstructor(globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject) JSC
pub fn getS3ClientConstructor(globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject) JSC.JSValue {
return JSC.WebCore.S3Client.getConstructor(globalThis);
}
+
pub fn getS3DefaultClient(globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject) JSC.JSValue {
return globalThis.bunVM().rareData().s3DefaultClient(globalThis);
}
+
+pub fn getValkeyDefaultClient(globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject) JSC.JSValue {
+ const valkey = JSC.API.Valkey.create(globalThis, &[_]JSValue{.undefined}) catch |err| {
+ if (err != error.JSError) {
+ _ = globalThis.throwError(err, "Failed to create Redis client") catch {};
+ return .zero;
+ }
+ return .zero;
+ };
+
+ return valkey.toJS(globalThis);
+}
+
+pub fn getValkeyClientConstructor(globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject) JSC.JSValue {
+ return JSC.API.Valkey.getConstructor(globalThis);
+}
+
pub fn getEmbeddedFiles(globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject) JSC.JSValue {
const vm = globalThis.bunVM();
const graph = vm.standalone_module_graph orelse return JSC.JSValue.createEmptyArray(globalThis, 0);
diff --git a/src/bun.js/api/Timer.zig b/src/bun.js/api/Timer.zig
index ddacbcf984..9359ce3b3c 100644
--- a/src/bun.js/api/Timer.zig
+++ b/src/bun.js/api/Timer.zig
@@ -1178,6 +1178,8 @@ pub const EventLoopTimer = struct {
WTFTimer,
PostgresSQLConnectionTimeout,
PostgresSQLConnectionMaxLifetime,
+ ValkeyConnectionTimeout,
+ ValkeyConnectionReconnect,
SubprocessTimeout,
pub fn Type(comptime T: Tag) type {
@@ -1194,6 +1196,8 @@ pub const EventLoopTimer = struct {
.PostgresSQLConnectionTimeout => JSC.Postgres.PostgresSQLConnection,
.PostgresSQLConnectionMaxLifetime => JSC.Postgres.PostgresSQLConnection,
.SubprocessTimeout => JSC.Subprocess,
+ .ValkeyConnectionReconnect => JSC.API.Valkey,
+ .ValkeyConnectionTimeout => JSC.API.Valkey,
};
}
} else enum {
@@ -1207,6 +1211,8 @@ pub const EventLoopTimer = struct {
DNSResolver,
PostgresSQLConnectionTimeout,
PostgresSQLConnectionMaxLifetime,
+ ValkeyConnectionTimeout,
+ ValkeyConnectionReconnect,
SubprocessTimeout,
pub fn Type(comptime T: Tag) type {
@@ -1221,6 +1227,8 @@ pub const EventLoopTimer = struct {
.DNSResolver => DNSResolver,
.PostgresSQLConnectionTimeout => JSC.Postgres.PostgresSQLConnection,
.PostgresSQLConnectionMaxLifetime => JSC.Postgres.PostgresSQLConnection,
+ .ValkeyConnectionTimeout => JSC.API.Valkey,
+ .ValkeyConnectionReconnect => JSC.API.Valkey,
.SubprocessTimeout => JSC.Subprocess,
};
}
@@ -1279,6 +1287,8 @@ pub const EventLoopTimer = struct {
switch (this.tag) {
.PostgresSQLConnectionTimeout => return @as(*JSC.Postgres.PostgresSQLConnection, @alignCast(@fieldParentPtr("timer", this))).onConnectionTimeout(),
.PostgresSQLConnectionMaxLifetime => return @as(*JSC.Postgres.PostgresSQLConnection, @alignCast(@fieldParentPtr("max_lifetime_timer", this))).onMaxLifetimeTimeout(),
+ .ValkeyConnectionTimeout => return @as(*JSC.API.Valkey, @alignCast(@fieldParentPtr("timer", this))).onConnectionTimeout(),
+ .ValkeyConnectionReconnect => return @as(*JSC.API.Valkey, @alignCast(@fieldParentPtr("reconnect_timer", this))).onReconnectTimer(),
inline else => |t| {
if (@FieldType(t.Type(), "event_loop_timer") != EventLoopTimer) {
@compileError(@typeName(t.Type()) ++ " has wrong type for 'event_loop_timer'");
diff --git a/src/bun.js/api/valkey.classes.ts b/src/bun.js/api/valkey.classes.ts
new file mode 100644
index 0000000000..6a71b6b9ed
--- /dev/null
+++ b/src/bun.js/api/valkey.classes.ts
@@ -0,0 +1,228 @@
+import { define } from "../../codegen/class-definitions";
+
+export default [
+ define({
+ name: "RedisClient",
+ construct: true,
+ call: false,
+ finalize: true,
+ configurable: false,
+ JSType: "0b11101110",
+ memoryCost: true,
+ proto: {
+ connected: {
+ getter: "getConnected",
+ },
+ onconnect: {
+ getter: "getOnConnect",
+ setter: "setOnConnect",
+ this: true,
+ },
+ onclose: {
+ getter: "getOnClose",
+ setter: "setOnClose",
+ this: true,
+ },
+ bufferedAmount: {
+ getter: "getBufferedAmount",
+ },
+ // Valkey commands
+ get: {
+ fn: "get",
+ length: 1,
+ },
+ set: {
+ fn: "set",
+ length: 2,
+ },
+ del: {
+ fn: "del",
+ length: 1,
+ },
+ incr: {
+ fn: "incr",
+ length: 1,
+ },
+ decr: {
+ fn: "decr",
+ length: 1,
+ },
+ exists: {
+ fn: "exists",
+ length: 1,
+ },
+ expire: {
+ fn: "expire",
+ length: 2,
+ },
+ connect: {
+ fn: "jsConnect",
+ length: 0,
+ },
+ disconnect: {
+ fn: "jsDisconnect",
+ length: 0,
+ },
+ send: {
+ fn: "jsSend",
+ length: 2,
+ },
+ ttl: {
+ fn: "ttl",
+ length: 1,
+ },
+ hmset: {
+ fn: "hmset",
+ length: 3,
+ },
+ hmget: {
+ fn: "hmget",
+ length: 2,
+ },
+ sismember: {
+ fn: "sismember",
+ length: 2,
+ },
+ sadd: {
+ fn: "sadd",
+ length: 2,
+ },
+ srem: {
+ fn: "srem",
+ length: 2,
+ },
+ smembers: {
+ fn: "smembers",
+ length: 1,
+ },
+ srandmember: {
+ fn: "srandmember",
+ length: 1,
+ },
+ spop: {
+ fn: "spop",
+ length: 1,
+ },
+ hincrby: {
+ fn: "hincrby",
+ length: 3,
+ },
+ hincrbyfloat: {
+ fn: "hincrbyfloat",
+ length: 3,
+ },
+ bitcount: {
+ fn: "bitcount",
+ },
+ dump: {
+ fn: "dump",
+ },
+ expiretime: {
+ fn: "expiretime",
+ },
+ getdel: {
+ fn: "getdel",
+ },
+ getex: {
+ fn: "getex",
+ },
+ hgetall: {
+ fn: "hgetall",
+ },
+ hkeys: {
+ fn: "hkeys",
+ },
+ hlen: {
+ fn: "hlen",
+ },
+ hvals: {
+ fn: "hvals",
+ },
+ keys: {
+ fn: "keys",
+ },
+ llen: {
+ fn: "llen",
+ },
+ lpop: {
+ fn: "lpop",
+ },
+ persist: {
+ fn: "persist",
+ },
+ pexpiretime: {
+ fn: "pexpiretime",
+ },
+ pttl: {
+ fn: "pttl",
+ },
+ rpop: {
+ fn: "rpop",
+ },
+ scard: {
+ fn: "scard",
+ },
+ strlen: {
+ fn: "strlen",
+ },
+ zcard: {
+ fn: "zcard",
+ },
+ zpopmax: {
+ fn: "zpopmax",
+ },
+ zpopmin: {
+ fn: "zpopmin",
+ },
+ zrandmember: {
+ fn: "zrandmember",
+ },
+ append: {
+ fn: "append",
+ },
+ getset: {
+ fn: "getset",
+ },
+ lpush: {
+ fn: "lpush",
+ },
+ lpushx: {
+ fn: "lpushx",
+ },
+ pfadd: {
+ fn: "pfadd",
+ },
+ rpush: {
+ fn: "rpush",
+ },
+ rpushx: {
+ fn: "rpushx",
+ },
+ setnx: {
+ fn: "setnx",
+ },
+ zscore: {
+ fn: "zscore",
+ },
+ mget: {
+ fn: "mget",
+ },
+ ping: { fn: "ping" },
+ publish: { fn: "publish" },
+ script: { fn: "script" },
+ select: { fn: "select" },
+ spublish: { fn: "spublish" },
+ smove: { fn: "smove" },
+ substr: { fn: "substr" },
+ hstrlen: { fn: "hstrlen" },
+ zrank: { fn: "zrank" },
+ zrevrank: { fn: "zrevrank" },
+ subscribe: { fn: "subscribe" },
+ psubscribe: { fn: "psubscribe" },
+ unsubscribe: { fn: "unsubscribe" },
+ punsubscribe: { fn: "punsubscribe" },
+ pubsub: { fn: "pubsub" },
+ },
+ values: ["onconnect", "onclose", "connectionPromise", "hello"],
+ }),
+];
diff --git a/src/bun.js/bindings/BunObject+exports.h b/src/bun.js/bindings/BunObject+exports.h
index d05a8cde6a..1d08f34c19 100644
--- a/src/bun.js/bindings/BunObject+exports.h
+++ b/src/bun.js/bindings/BunObject+exports.h
@@ -3,12 +3,14 @@
// --- Getters ---
#define FOR_EACH_GETTER(macro) \
+ macro(CSRF) \
macro(CryptoHasher) \
macro(FFI) \
macro(FileSystemRouter) \
macro(Glob) \
macro(MD4) \
macro(MD5) \
+ macro(S3Client) \
macro(SHA1) \
macro(SHA224) \
macro(SHA256) \
@@ -17,31 +19,33 @@
macro(SHA512_256) \
macro(TOML) \
macro(Transpiler) \
+ macro(ValkeyClient) \
macro(argv) \
macro(assetPrefix) \
macro(cwd) \
+ macro(embeddedFiles) \
macro(enableANSIColors) \
macro(hash) \
macro(inspect) \
macro(main) \
macro(origin) \
+ macro(s3) \
+ macro(semver) \
macro(stderr) \
macro(stdin) \
macro(stdout) \
macro(unsafe) \
- macro(semver) \
- macro(embeddedFiles) \
- macro(S3Client) \
- macro(s3) \
- macro(CSRF) \
+ macro(valkey) \
// --- Callbacks ---
#define FOR_EACH_CALLBACK(macro) \
macro(allocUnsafe) \
macro(braces) \
macro(build) \
- macro(connect) \
macro(color) \
+ macro(connect) \
+ macro(createParsedShellScript) \
+ macro(createShellInterpreter) \
macro(deflateSync) \
macro(file) \
macro(fs) \
@@ -53,7 +57,6 @@
macro(inflateSync) \
macro(jest) \
macro(listen) \
- macro(udpSocket) \
macro(mmap) \
macro(nanoseconds) \
macro(openInEditor) \
@@ -62,16 +65,15 @@
macro(resolveSync) \
macro(serve) \
macro(sha) \
+ macro(shellEscape) \
macro(shrink) \
macro(sleepSync) \
macro(spawn) \
macro(spawnSync) \
+ macro(stringWidth) \
+ macro(udpSocket) \
macro(which) \
macro(write) \
- macro(stringWidth) \
- macro(shellEscape) \
- macro(createShellInterpreter) \
- macro(createParsedShellScript) \
#define DECLARE_ZIG_BUN_OBJECT_CALLBACK(name) BUN_DECLARE_HOST_FUNCTION(BunObject_callback_##name);
FOR_EACH_CALLBACK(DECLARE_ZIG_BUN_OBJECT_CALLBACK);
diff --git a/src/bun.js/bindings/BunObject.cpp b/src/bun.js/bindings/BunObject.cpp
index 964d9cad4b..0e05be6107 100644
--- a/src/bun.js/bindings/BunObject.cpp
+++ b/src/bun.js/bindings/BunObject.cpp
@@ -788,6 +788,8 @@ JSC_DEFINE_HOST_FUNCTION(functionFileURLToPath, (JSC::JSGlobalObject * globalObj
unsafe BunObject_getter_wrap_unsafe DontDelete|PropertyCallback
version constructBunVersion ReadOnly|DontDelete|PropertyCallback
which BunObject_callback_which DontDelete|Function 1
+ RedisClient BunObject_getter_wrap_ValkeyClient DontDelete|PropertyCallback
+ redis BunObject_getter_wrap_valkey DontDelete|PropertyCallback
write BunObject_callback_write DontDelete|Function 1
@end
*/
diff --git a/src/bun.js/bindings/ErrorCode.ts b/src/bun.js/bindings/ErrorCode.ts
index 20443969cf..cae2652bf1 100644
--- a/src/bun.js/bindings/ErrorCode.ts
+++ b/src/bun.js/bindings/ErrorCode.ts
@@ -246,5 +246,24 @@ const errors: ErrorCodeMapping = [
["ERR_ZLIB_INITIALIZATION_FAILED", Error],
["MODULE_NOT_FOUND", Error],
["ERR_INTERNAL_ASSERTION", Error],
+
+ ["ERR_REDIS_CONNECTION_CLOSED", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_RESPONSE", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_BULK_STRING", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_ARRAY", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_INTEGER", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_SIMPLE_STRING", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_ERROR_STRING", Error, "RedisError"],
+ ["ERR_REDIS_TLS_NOT_AVAILABLE", Error, "RedisError"],
+ ["ERR_REDIS_TLS_UPGRADE_FAILED", Error, "RedisError"],
+ ["ERR_REDIS_AUTHENTICATION_FAILED", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_PASSWORD", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_USERNAME", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_DATABASE", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_COMMAND", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_ARGUMENT", Error, "RedisError"],
+ ["ERR_REDIS_INVALID_RESPONSE_TYPE", Error, "RedisError"],
+ ["ERR_REDIS_CONNECTION_TIMEOUT", Error, "RedisError"],
+ ["ERR_REDIS_IDLE_TIMEOUT", Error, "RedisError"],
];
export default errors;
diff --git a/src/bun.js/bindings/JSRef.zig b/src/bun.js/bindings/JSRef.zig
new file mode 100644
index 0000000000..d6f452cebe
--- /dev/null
+++ b/src/bun.js/bindings/JSRef.zig
@@ -0,0 +1,82 @@
+pub const JSRef = union(enum) {
+ weak: JSC.JSValue,
+ strong: JSC.Strong,
+ finalized: void,
+
+ pub fn initWeak(value: JSC.JSValue) @This() {
+ return .{ .weak = value };
+ }
+
+ pub fn initStrong(value: JSC.JSValue, globalThis: *JSC.JSGlobalObject) @This() {
+ return .{ .strong = JSC.Strong.create(value, globalThis) };
+ }
+
+ pub fn empty() @This() {
+ return .{ .weak = .zero };
+ }
+
+ pub fn get(this: *@This()) JSC.JSValue {
+ return switch (this.*) {
+ .weak => this.weak,
+ .strong => this.strong.get() orelse .zero,
+ .finalized => .zero,
+ };
+ }
+
+ pub fn tryGet(this: *@This()) ?JSC.JSValue {
+ return switch (this.*) {
+ .weak => if (this.weak != .zero) this.weak else null,
+ .strong => this.strong.get(),
+ .finalized => null,
+ };
+ }
+ pub fn setWeak(this: *@This(), value: JSC.JSValue) void {
+ switch (this.*) {
+ .weak => {},
+ .strong => {
+ this.strong.deinit();
+ },
+ .finalized => {
+ return;
+ },
+ }
+ this.* = .{ .weak = value };
+ }
+
+ pub fn setStrong(this: *@This(), value: JSC.JSValue, globalThis: *JSC.JSGlobalObject) void {
+ if (this.* == .strong) {
+ this.strong.set(globalThis, value);
+ return;
+ }
+ this.* = .{ .strong = JSC.Strong.create(value, globalThis) };
+ }
+
+ pub fn upgrade(this: *@This(), globalThis: *JSC.JSGlobalObject) void {
+ switch (this.*) {
+ .weak => {
+ bun.assert(this.weak != .zero);
+ this.* = .{ .strong = JSC.Strong.create(this.weak, globalThis) };
+ },
+ .strong => {},
+ .finalized => {
+ bun.debugAssert(false);
+ },
+ }
+ }
+
+ pub fn deinit(this: *@This()) void {
+ switch (this.*) {
+ .weak => {
+ this.weak = .zero;
+ },
+ .strong => {
+ this.strong.deinit();
+ },
+ .finalized => {},
+ }
+ }
+};
+
+const JSC = bun.JSC;
+const JSValue = JSC.JSValue;
+const bun = @import("root").bun;
diff --git a/src/bun.js/bindings/ZigString.zig b/src/bun.js/bindings/ZigString.zig
index 9b171b6b70..74cc6b4308 100644
--- a/src/bun.js/bindings/ZigString.zig
+++ b/src/bun.js/bindings/ZigString.zig
@@ -355,6 +355,10 @@ pub const ZigString = extern struct {
};
}
+ pub fn byteLength(this: *const Slice) usize {
+ return this.len;
+ }
+
pub fn toZigString(this: Slice) ZigString {
if (this.isAllocated())
return ZigString.initUTF8(this.ptr[0..this.len]);
@@ -419,7 +423,7 @@ pub const ZigString = extern struct {
return Slice{ .allocator = NullableAllocator.init(allocator), .ptr = duped.ptr, .len = this.len };
}
- pub fn slice(this: Slice) []const u8 {
+ pub fn slice(this: *const Slice) []const u8 {
return this.ptr[0..this.len];
}
diff --git a/src/bun.js/bindings/bindings.zig b/src/bun.js/bindings/bindings.zig
index 9633829240..89fefe4495 100644
--- a/src/bun.js/bindings/bindings.zig
+++ b/src/bun.js/bindings/bindings.zig
@@ -61,7 +61,7 @@ pub const WTF = @import("./WTF.zig").WTF;
pub const ScriptExecutionStatus = @import("./ScriptExecutionStatus.zig").ScriptExecutionStatus;
pub const DeferredError = @import("./DeferredError.zig").DeferredError;
pub const Sizes = @import("./sizes.zig");
-
+pub const JSRef = @import("./JSRef.zig").JSRef;
pub fn PromiseCallback(comptime Type: type, comptime CallbackFunction: fn (*Type, *JSGlobalObject, []const JSValue) anyerror!JSValue) type {
return struct {
pub fn callback(
diff --git a/src/bun.js/bindings/generated_classes_list.zig b/src/bun.js/bindings/generated_classes_list.zig
index a4eea5ebae..34fcfc718d 100644
--- a/src/bun.js/bindings/generated_classes_list.zig
+++ b/src/bun.js/bindings/generated_classes_list.zig
@@ -82,4 +82,5 @@ pub const Classes = struct {
pub const S3Client = JSC.WebCore.S3Client;
pub const S3Stat = JSC.WebCore.S3Stat;
pub const HTMLBundle = JSC.API.HTMLBundle;
+ pub const RedisClient = JSC.API.Valkey;
};
diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig
index 1b2862092d..208a07952b 100644
--- a/src/bun.js/node/types.zig
+++ b/src/bun.js/node/types.zig
@@ -382,14 +382,29 @@ pub const BlobOrStringOrBuffer = union(enum) {
}
}
- pub fn fromJS(global: *JSC.JSGlobalObject, allocator: std.mem.Allocator, value: JSC.JSValue) JSError!?BlobOrStringOrBuffer {
- if (value.as(JSC.WebCore.Blob)) |blob| {
+ pub fn byteLength(this: *const BlobOrStringOrBuffer) usize {
+ return this.slice().len;
+ }
+
+ pub fn fromJSMaybeFile(global: *JSC.JSGlobalObject, allocator: std.mem.Allocator, value: JSC.JSValue, allow_file: bool) JSError!?BlobOrStringOrBuffer {
+ // Check StringOrBuffer first because it's more common and cheaper.
+ const str = try StringOrBuffer.fromJS(global, allocator, value) orelse {
+ const blob = value.as(JSC.WebCore.Blob) orelse return null;
+ if (allow_file and blob.needsToReadFile()) {
+ return global.throwInvalidArguments("File blob cannot be used here", .{});
+ }
+
if (blob.store) |store| {
store.ref();
}
return .{ .blob = blob.* };
- }
- return .{ .string_or_buffer = try StringOrBuffer.fromJS(global, allocator, value) orelse return null };
+ };
+
+ return .{ .string_or_buffer = str };
+ }
+
+ pub fn fromJS(global: *JSC.JSGlobalObject, allocator: std.mem.Allocator, value: JSC.JSValue) JSError!?BlobOrStringOrBuffer {
+ return fromJSMaybeFile(global, allocator, value, true);
}
pub fn fromJSWithEncodingValue(global: *JSC.JSGlobalObject, allocator: std.mem.Allocator, value: JSC.JSValue, encoding_value: JSC.JSValue) bun.JSError!?BlobOrStringOrBuffer {
diff --git a/src/bun.js/rare_data.zig b/src/bun.js/rare_data.zig
index 098d6fd349..de0f7ada7c 100644
--- a/src/bun.js/rare_data.zig
+++ b/src/bun.js/rare_data.zig
@@ -1,4 +1,5 @@
const EditorContext = @import("../open.zig").EditorContext;
+const ValkeyContext = @import("../valkey/valkey.zig").ValkeyContext;
const Blob = JSC.WebCore.Blob;
const default_allocator = bun.default_allocator;
const Output = bun.Output;
@@ -53,6 +54,8 @@ aws_signature_cache: AWSSignatureCache = .{},
s3_default_client: JSC.Strong = .empty,
default_csrf_secret: []const u8 = "",
+valkey_context: ValkeyContext = .{},
+
const PipeReadBuffer = [256 * 1024]u8;
const DIGESTED_HMAC_256_LEN = 32;
pub const AWSSignatureCache = struct {
@@ -502,4 +505,6 @@ pub fn deinit(this: *RareData) void {
}
this.cleanup_hooks.clearAndFree(bun.default_allocator);
+
+ this.valkey_context.deinit();
}
diff --git a/src/bun.js/webcore/AutoFlusher.zig b/src/bun.js/webcore/AutoFlusher.zig
new file mode 100644
index 0000000000..9952587e5a
--- /dev/null
+++ b/src/bun.js/webcore/AutoFlusher.zig
@@ -0,0 +1,26 @@
+registered: bool = false,
+
+pub fn registerDeferredMicrotaskWithType(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
+ if (this.auto_flusher.registered) return;
+ registerDeferredMicrotaskWithTypeUnchecked(Type, this, vm);
+}
+
+pub fn unregisterDeferredMicrotaskWithType(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
+ if (!this.auto_flusher.registered) return;
+ unregisterDeferredMicrotaskWithTypeUnchecked(Type, this, vm);
+}
+
+pub fn unregisterDeferredMicrotaskWithTypeUnchecked(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
+ bun.assert(this.auto_flusher.registered);
+ bun.assert(vm.eventLoop().deferred_tasks.unregisterTask(this));
+ this.auto_flusher.registered = false;
+}
+
+pub fn registerDeferredMicrotaskWithTypeUnchecked(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
+ bun.assert(!this.auto_flusher.registered);
+ this.auto_flusher.registered = true;
+ bun.assert(!vm.eventLoop().deferred_tasks.postTask(this, @ptrCast(&Type.onAutoFlush)));
+}
+
+const bun = @import("root").bun;
+const JSC = bun.JSC;
diff --git a/src/bun.js/webcore/streams.zig b/src/bun.js/webcore/streams.zig
index 9cbf8fe9cb..08afe3b5ae 100644
--- a/src/bun.js/webcore/streams.zig
+++ b/src/bun.js/webcore/streams.zig
@@ -1564,32 +1564,6 @@ pub const ArrayBufferSink = struct {
pub const JSSink = NewJSSink(@This(), "ArrayBufferSink");
};
-pub const AutoFlusher = struct {
- registered: bool = false,
-
- pub fn registerDeferredMicrotaskWithType(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
- if (this.auto_flusher.registered) return;
- registerDeferredMicrotaskWithTypeUnchecked(Type, this, vm);
- }
-
- pub fn unregisterDeferredMicrotaskWithType(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
- if (!this.auto_flusher.registered) return;
- unregisterDeferredMicrotaskWithTypeUnchecked(Type, this, vm);
- }
-
- pub fn unregisterDeferredMicrotaskWithTypeUnchecked(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
- bun.assert(this.auto_flusher.registered);
- bun.assert(vm.eventLoop().deferred_tasks.unregisterTask(this));
- this.auto_flusher.registered = false;
- }
-
- pub fn registerDeferredMicrotaskWithTypeUnchecked(comptime Type: type, this: *Type, vm: *JSC.VirtualMachine) void {
- bun.assert(!this.auto_flusher.registered);
- this.auto_flusher.registered = true;
- bun.assert(!vm.eventLoop().deferred_tasks.postTask(this, @ptrCast(&Type.onAutoFlush)));
- }
-};
-
pub const SinkDestructor = struct {
const Detached = opaque {};
const Subprocess = JSC.API.Bun.Subprocess;
@@ -3421,6 +3395,8 @@ pub fn ReadableStreamSource(
};
}
+pub const AutoFlusher = @import("./AutoFlusher.zig");
+
pub const FileSink = struct {
writer: IOWriter = .{},
event_loop_handle: JSC.EventLoopHandle,
diff --git a/src/bun.zig b/src/bun.zig
index 6b3fee55f2..779eac7e63 100644
--- a/src/bun.zig
+++ b/src/bun.zig
@@ -443,53 +443,10 @@ pub const StringHashMapUnowned = struct {
}
};
};
+pub const OffsetList = @import("./baby_list.zig").OffsetList;
pub const BabyList = @import("./baby_list.zig").BabyList;
pub const ByteList = BabyList(u8);
-pub const OffsetByteList = struct {
- head: u32 = 0,
- byte_list: ByteList = .{},
-
- pub fn init(head: u32, byte_list: ByteList) OffsetByteList {
- return OffsetByteList{
- .head = head,
- .byte_list = byte_list,
- };
- }
-
- pub fn write(self: *OffsetByteList, allocator: std.mem.Allocator, bytes: []const u8) !void {
- _ = try self.byte_list.write(allocator, bytes);
- }
-
- pub fn slice(this: *OffsetByteList) []u8 {
- return this.byte_list.slice()[0..this.head];
- }
-
- pub fn remaining(this: *OffsetByteList) []u8 {
- return this.byte_list.slice()[this.head..];
- }
-
- pub fn consume(self: *OffsetByteList, bytes: u32) void {
- self.head +|= bytes;
- if (self.head >= self.byte_list.len) {
- self.head = 0;
- self.byte_list.len = 0;
- }
- }
-
- pub fn len(self: *const OffsetByteList) u32 {
- return self.byte_list.len - self.head;
- }
-
- pub fn clear(self: *OffsetByteList) void {
- self.head = 0;
- self.byte_list.len = 0;
- }
-
- pub fn deinit(self: *OffsetByteList, allocator: std.mem.Allocator) void {
- self.byte_list.deinitWithAllocator(allocator);
- self.* = .{};
- }
-};
+pub const OffsetByteList = OffsetList(u8);
pub fn DebugOnly(comptime Type: type) type {
if (comptime Environment.allow_assert) {
@@ -4232,3 +4189,4 @@ pub fn freeSensitive(allocator: std.mem.Allocator, slice: anytype) void {
pub const server = @import("./bun.js/api/server.zig");
pub const macho = @import("./macho.zig");
+pub const valkey = @import("./valkey/index.zig");
diff --git a/src/codegen/generate-classes.ts b/src/codegen/generate-classes.ts
index 0355fd999c..ae8221b565 100644
--- a/src/codegen/generate-classes.ts
+++ b/src/codegen/generate-classes.ts
@@ -1892,7 +1892,7 @@ const JavaScriptCoreBindings = struct {
exports.set("finalize", classSymbolName(typeName, "finalize"));
output += `
pub fn ${classSymbolName(typeName, "finalize")}(thisValue: *${typeName}) callconv(JSC.conv) void {
- if (comptime Environment.enable_logs) zig("~${typeName} 0x{x:8}", .{@intFromPtr(thisValue)});
+ if (comptime Environment.enable_logs) log_zig_finalize("${typeName}", thisValue);
@call(.always_inline, ${typeName}.finalize, .{thisValue});
}
`;
@@ -1902,7 +1902,7 @@ const JavaScriptCoreBindings = struct {
exports.set("construct", classSymbolName(typeName, "construct"));
output += `
pub fn ${classSymbolName(typeName, "construct")}(globalObject: *JSC.JSGlobalObject, callFrame: *JSC.CallFrame) callconv(JSC.conv) ?*anyopaque {
- if (comptime Environment.enable_logs) zig("new ${typeName}({})", .{callFrame});
+ if (comptime Environment.enable_logs) log_zig_constructor("${typeName}", callFrame);
return @as(*${typeName}, ${typeName}.constructor(globalObject, callFrame) catch |err| switch (err) {
error.JSError => return null,
error.OutOfMemory => {
@@ -1918,7 +1918,7 @@ const JavaScriptCoreBindings = struct {
exports.set("call", classSymbolName(typeName, "call"));
output += `
pub fn ${classSymbolName(typeName, "call")}(globalObject: *JSC.JSGlobalObject, callFrame: *JSC.CallFrame) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) zig("${typeName}({})", .{callFrame});
+ if (comptime Environment.enable_logs) log_zig_call("${typeName}", callFrame);
return @call(.always_inline, JSC.toJSHostFunction(${typeName}.call), .{globalObject, callFrame});
}
`;
@@ -1928,7 +1928,7 @@ const JavaScriptCoreBindings = struct {
exports.set("getInternalProperties", classSymbolName(typeName, "getInternalProperties"));
output += `
pub fn ${classSymbolName(typeName, "getInternalProperties")}(thisValue: *${typeName}, globalObject: *JSC.JSGlobalObject, thisValue: JSC.JSValue) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_get_internal_properties("${typeName}");
return @call(.always_inline, ${typeName}.getInternalProperties, .{thisValue, globalObject, thisValue});
}
`;
@@ -1942,7 +1942,7 @@ const JavaScriptCoreBindings = struct {
if (names.getter) {
output += `
pub fn ${names.getter}(this: *${typeName}, ${thisValue ? "thisValue: JSC.JSValue," : ""} globalObject: *JSC.JSGlobalObject) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) zig("get ${typeName}.${name}", .{});
+ if (comptime Environment.enable_logs) log_zig_getter("${typeName}", "${name}");
return @call(.always_inline, ${typeName}.${getter}, .{this, ${thisValue ? "thisValue," : ""} globalObject});
}
`;
@@ -1951,7 +1951,7 @@ const JavaScriptCoreBindings = struct {
if (names.setter) {
output += `
pub fn ${names.setter}(this: *${typeName}, ${thisValue ? "thisValue: JSC.JSValue," : ""} globalObject: *JSC.JSGlobalObject, value: JSC.JSValue) callconv(JSC.conv) bool {
- if (comptime Environment.enable_logs) zig("set ${typeName}.${name} = {}", .{value});
+ if (comptime Environment.enable_logs) log_zig_setter("${typeName}", "${name}", value);
return @call(.always_inline, ${typeName}.${setter}, .{this, ${thisValue ? "thisValue," : ""} globalObject, value});
}
`;
@@ -1971,7 +1971,7 @@ const JavaScriptCoreBindings = struct {
output += `
pub fn ${names.fn}(thisValue: *${typeName}, globalObject: *JSC.JSGlobalObject, callFrame: *JSC.CallFrame${proto[name].passThis ? ", js_this_value: JSC.JSValue" : ""}) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) zig("${typeName}.${name}({})", .{callFrame});
+ if (comptime Environment.enable_logs) log_zig_method("${typeName}", "${name}", callFrame);
return @call(.always_inline, JSC.toJSHostValue, .{globalObject, @call(.always_inline, ${typeName}.${fn}, .{thisValue, globalObject, callFrame${proto[name].passThis ? ", js_this_value" : ""}})});
}
`;
@@ -1987,7 +1987,7 @@ const JavaScriptCoreBindings = struct {
if (names.getter) {
output += `
pub fn ${names.getter}(globalObject: *JSC.JSGlobalObject, ${thisValue ? "thisValue: JSC.JSValue," : ""} propertyName: JSC.JSValue) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_class_getter("${typeName}", "${name}");
return @call(.always_inline, ${typeName}.${getter}, .{globalObject, ${thisValue ? "thisValue," : ""} propertyName});
}
`;
@@ -1996,7 +1996,7 @@ const JavaScriptCoreBindings = struct {
if (names.setter) {
output += `
pub fn ${names.setter}(globalObject: *JSC.JSGlobalObject, thisValue: JSC.JSValue, target: JSC.JSValue) callconv(JSC.conv) bool {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_class_setter("${typeName}", "${name}", target);
return @call(.always_inline, ${typeName}.${setter || accessor.setter}, .{thisValue, globalObject, target});
}
`;
@@ -2010,7 +2010,7 @@ const JavaScriptCoreBindings = struct {
pub fn ${names.DOMJIT}(globalObject: *JSC.JSGlobalObject, thisValue: JSC.JSValue, ${args
.map(ZigDOMJITArgTypeDefinition)
.join(", ")}) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_class_domjit("${typeName}", "${name}");
return @call(.always_inline, ${typeName}.${DOMJITName(fn)}, .{thisValue, globalObject, ${args.map((_, i) => `arg${i}`).join(", ")}});
}
`;
@@ -2018,7 +2018,7 @@ const JavaScriptCoreBindings = struct {
output += `
pub fn ${names.fn}(globalObject: *JSC.JSGlobalObject, callFrame: *JSC.CallFrame) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_class_method("${typeName}", "${name}", callFrame);
return @call(.always_inline, JSC.toJSHostFunction(${typeName}.${fn}), .{globalObject, callFrame});
}
`;
@@ -2030,7 +2030,7 @@ const JavaScriptCoreBindings = struct {
exports.set("structuredClone", symbolName(typeName, "onStructuredCloneSerialize"));
output += `
pub fn ${symbolName(typeName, "onStructuredCloneSerialize")}(thisValue: *${typeName}, globalObject: *JSC.JSGlobalObject, ctx: *anyopaque, writeBytes: WriteBytesFn) callconv(JSC.conv) void {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_structured_clone_serialize("${typeName}");
@call(.always_inline, ${typeName}.onStructuredCloneSerialize, .{thisValue, globalObject, ctx, writeBytes});
}
`;
@@ -2039,7 +2039,7 @@ const JavaScriptCoreBindings = struct {
exports.set("structuredClone_transferable", symbolName(typeName, "onStructuredCloneTransfer"));
output += `
pub fn ${exports.get("structuredClone_transferable")}(thisValue: *${typeName}, globalObject: *JSC.JSGlobalObject, ctx: *anyopaque, write: WriteBytesFn) callconv(JSC.conv) void {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_structured_clone_transfer("${typeName}");
@call(.always_inline, ${typeName}.onStructuredCloneTransfer, .{thisValue, globalObject, ctx, write});
}
`;
@@ -2049,7 +2049,7 @@ const JavaScriptCoreBindings = struct {
output += `
pub fn ${symbolName(typeName, "onStructuredCloneDeserialize")}(globalObject: *JSC.JSGlobalObject, ptr: [*]u8, end: [*]u8) callconv(JSC.conv) JSC.JSValue {
- if (comptime Environment.enable_logs) JSC.markBinding(@src());
+ if (comptime Environment.enable_logs) log_zig_structured_clone_deserialize("${typeName}");
return @call(.always_inline, JSC.toJSHostValue, .{ globalObject, @call(.always_inline, ${typeName}.onStructuredCloneDeserialize, .{globalObject, ptr, end}) });
}
`;
@@ -2083,7 +2083,7 @@ pub const ${className(typeName)} = struct {
/// Return the pointer to the wrapped object.
/// If the object does not match the type, return null.
pub fn fromJS(value: JSC.JSValue) ?*${typeName} {
- if (comptime Environment.enable_logs) zig("${typeName}.fromJS", .{});
+ if (comptime Environment.enable_logs) log_zig_from_js("${typeName}");
return ${symbolName(typeName, "fromJS")}(value);
}
@@ -2092,7 +2092,7 @@ pub const ${className(typeName)} = struct {
/// If the object is a subclass of the type or has mutated the structure, return null.
/// Note: this may return null for direct instances of the type if the user adds properties to the object.
pub fn fromJSDirect(value: JSC.JSValue) ?*${typeName} {
- if (comptime Environment.enable_logs) zig("${typeName}.fromJSDirect", .{});
+ if (comptime Environment.enable_logs) log_zig_from_js_direct("${typeName}");
return ${symbolName(typeName, "fromJSDirect")}(value);
}
@@ -2104,7 +2104,7 @@ pub const ${className(typeName)} = struct {
/// Get the ${typeName} constructor value.
/// This loads lazily from the global object.
pub fn getConstructor(globalObject: *JSC.JSGlobalObject) JSC.JSValue {
- if (comptime Environment.enable_logs) zig("${typeName}.getConstructor", .{});
+ if (comptime Environment.enable_logs) log_zig_get_constructor("${typeName}");
return ${symbolName(typeName, "getConstructor")}(globalObject);
}
`
@@ -2116,7 +2116,7 @@ pub const ${className(typeName)} = struct {
? `
/// Create a new instance of ${typeName}
pub fn toJS(this: *${typeName}, globalObject: *JSC.JSGlobalObject) JSC.JSValue {
- if (comptime Environment.enable_logs) zig("${typeName}.toJS", .{});
+ if (comptime Environment.enable_logs) log_zig_to_js("${typeName}");
if (comptime Environment.allow_assert) {
const value__ = ${symbolName(typeName, "create")}(globalObject, this);
@import("root").bun.assert(value__.as(${typeName}).? == this); // If this fails, likely a C ABI issue.
@@ -2469,6 +2469,118 @@ comptime {
${classes.map(a => `_ = ${className(a.name)};`).join("\n ")}
}
+
+
+// -- Avoid instantiating these log functions too many times
+fn log_zig_method_call(typename: []const u8, method_name: []const u8, callframe: *JSC.CallFrame) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.{s}({d} args)", .{typename, method_name, callframe.arguments().len});
+ }
+}
+
+fn log_zig_getter(typename: []const u8, property_name: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("get {s}.{s}", .{typename, property_name});
+ }
+}
+
+fn log_zig_setter(typename: []const u8, property_name: []const u8, value: JSC.JSValue) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("set {s}.{s} = {}", .{typename, property_name, value});
+ }
+}
+
+fn log_zig_finalize(typename: []const u8, ptr: *const anyopaque) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("~{s} 0x{x:8}", .{typename, @intFromPtr(ptr)});
+ }
+}
+
+fn log_zig_function_call(typename: []const u8, callframe: *JSC.CallFrame) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}({d} args)", .{typename, callframe.arguments().len});
+ }
+}
+
+fn log_zig_constructor(typename: []const u8, callframe: *JSC.CallFrame) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("new {s}({d} args)", .{typename, callframe.arguments().len});
+ }
+}
+
+fn log_zig_call(typename: []const u8, callframe: *JSC.CallFrame) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}({d} args)", .{typename, callframe.arguments().len});
+ }
+}
+
+fn log_zig_get_internal_properties(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("getInternalProperties {s}", .{typename});
+ }
+}
+
+fn log_zig_method(typename: []const u8, method_name: []const u8, callframe: *JSC.CallFrame) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.{s}({d} args)", .{typename, method_name, callframe.arguments().len});
+ }
+}
+
+fn log_zig_structured_clone_serialize(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("structuredCloneSerialize {s}", .{typename});
+ }
+}
+
+fn log_zig_structured_clone_transfer(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("structuredCloneTransfer {s}", .{typename});
+ }
+}
+
+fn log_zig_structured_clone_deserialize(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("structuredCloneDeserialize {s}", .{typename});
+ }
+}
+
+fn log_zig_from_js(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.fromJS", .{typename});
+ }
+}
+
+fn log_zig_from_js_direct(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.fromJSDirect", .{typename});
+ }
+}
+
+fn log_zig_get_constructor(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.constructor", .{typename});
+ }
+}
+
+
+fn log_zig_to_js(typename: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.toJS", .{typename});
+ }
+}
+
+fn log_zig_class_method(typename: []const u8, method_name: []const u8, callframe: *JSC.CallFrame) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("{s}.{s}({d} args)", .{typename, method_name, callframe.arguments().len});
+ }
+}
+
+fn log_zig_class_getter(typename: []const u8, property_name: []const u8) callconv(bun.callconv_inline) void {
+ if (comptime Environment.enable_logs) {
+ zig("static get {s}.{s}", .{typename, property_name});
+ }
+}
+
`,
]);
diff --git a/src/deps/uws/socket.zig b/src/deps/uws/socket.zig
index 4c7feff00d..a319a14f99 100644
--- a/src/deps/uws/socket.zig
+++ b/src/deps/uws/socket.zig
@@ -129,8 +129,9 @@ pub const Socket = opaque {
}
pub fn write(this: *Socket, ssl: bool, data: []const u8, msg_more: bool) i32 {
- debug("us_socket_write({d}, {d})", .{ @intFromPtr(this), data.len });
- return us_socket_write(@intFromBool(ssl), this, data.ptr, @intCast(data.len), @intFromBool(msg_more));
+ const rc = us_socket_write(@intFromBool(ssl), this, data.ptr, @intCast(data.len), @intFromBool(msg_more));
+ debug("us_socket_write({d}, {d}) = {d}", .{ @intFromPtr(this), data.len, rc });
+ return rc;
}
pub fn write2(this: *Socket, ssl: bool, first: []const u8, second: []const u8) i32 {
diff --git a/src/jsc.zig b/src/jsc.zig
index 1eed6d914a..718f26fca8 100644
--- a/src/jsc.zig
+++ b/src/jsc.zig
@@ -54,6 +54,7 @@ pub const API = struct {
pub const NativeZlib = @import("./bun.js/node/node_zlib_binding.zig").SNativeZlib;
pub const NativeBrotli = @import("./bun.js/node/node_zlib_binding.zig").SNativeBrotli;
pub const HTMLBundle = @import("./bun.js/api/server/HTMLBundle.zig");
+ pub const Valkey = @import("./valkey/js_valkey.zig").JSValkeyClient;
};
pub const Postgres = @import("./sql/postgres.zig");
pub const DNS = @import("./bun.js/api/bun/dns_resolver.zig");
diff --git a/src/sql/postgres.zig b/src/sql/postgres.zig
index 20612af47a..d07c25c9b1 100644
--- a/src/sql/postgres.zig
+++ b/src/sql/postgres.zig
@@ -259,64 +259,8 @@ pub const PostgresSQLQueryResultMode = enum(u8) {
raw = 2,
};
-const JSRef = union(enum) {
- weak: JSC.JSValue,
- strong: JSC.Strong,
+const JSRef = JSC.JSRef;
- pub fn initWeak(value: JSC.JSValue) @This() {
- return .{ .weak = value };
- }
-
- pub fn initStrong(value: JSC.JSValue, globalThis: *JSC.JSGlobalObject) @This() {
- return .{ .strong = JSC.Strong.create(value, globalThis) };
- }
-
- pub fn empty() @This() {
- return .{ .weak = .zero };
- }
-
- pub fn get(this: *@This()) JSC.JSValue {
- return switch (this.*) {
- .weak => this.weak,
- .strong => this.strong.get() orelse .zero,
- };
- }
- pub fn setWeak(this: *@This(), value: JSC.JSValue) void {
- if (this == .strong) {
- this.strong.deinit();
- }
- this.* = .{ .weak = value };
- }
-
- pub fn setStrong(this: *@This(), value: JSC.JSValue, globalThis: *JSC.JSGlobalObject) void {
- if (this == .strong) {
- this.strong.set(globalThis, value);
- return;
- }
- this.* = .{ .strong = JSC.Strong.create(value, globalThis) };
- }
-
- pub fn upgrade(this: *@This(), globalThis: *JSC.JSGlobalObject) void {
- switch (this.*) {
- .weak => {
- bun.assert(this.weak != .zero);
- this.* = .{ .strong = JSC.Strong.create(this.weak, globalThis) };
- },
- .strong => {},
- }
- }
-
- pub fn deinit(this: *@This()) void {
- switch (this.*) {
- .weak => {
- this.weak = .zero;
- },
- .strong => {
- this.strong.deinit();
- },
- }
- }
-};
pub const PostgresSQLQuery = struct {
statement: ?*PostgresSQLStatement = null,
query: bun.String = bun.String.empty,
diff --git a/src/valkey/ValkeyCommand.zig b/src/valkey/ValkeyCommand.zig
new file mode 100644
index 0000000000..d1bae8e74f
--- /dev/null
+++ b/src/valkey/ValkeyCommand.zig
@@ -0,0 +1,160 @@
+command: []const u8,
+args: Args,
+meta: Meta = .{},
+
+pub const Args = union(enum) {
+ slices: []const Slice,
+ args: []const JSC.Node.BlobOrStringOrBuffer,
+ raw: []const []const u8,
+
+ pub fn len(this: *const @This()) usize {
+ return switch (this.*) {
+ inline .slices, .args, .raw => |args| args.len,
+ };
+ }
+};
+
+pub fn write(this: *const Command, writer: anytype) !void {
+ // Serialize as RESP array format directly
+ try writer.print("*{d}\r\n", .{1 + this.args.len()});
+ try writer.print("${d}\r\n{s}\r\n", .{ this.command.len, this.command });
+
+ switch (this.args) {
+ inline .slices, .args => |args| {
+ for (args) |*arg| {
+ try writer.print("${d}\r\n{s}\r\n", .{ arg.byteLength(), arg.slice() });
+ }
+ },
+ .raw => |args| {
+ for (args) |arg| {
+ try writer.print("${d}\r\n{s}\r\n", .{ arg.len, arg });
+ }
+ },
+ }
+}
+
+pub fn format(this: Command, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
+ try this.write(writer);
+}
+
+pub fn byteLength(this: *const Command) usize {
+ return std.fmt.count("{}", .{this.*});
+}
+
+pub fn serialize(this: *const Command, allocator: std.mem.Allocator) ![]u8 {
+ var buf = try std.ArrayList(u8).initCapacity(allocator, this.byteLength());
+ errdefer buf.deinit();
+ try this.write(buf.writer());
+ return buf.items;
+}
+
+/// Command stored in offline queue when disconnected
+pub const Entry = struct {
+ serialized_data: []u8, // Pre-serialized RESP protocol bytes
+ meta: Meta = .{},
+ promise: Promise,
+
+ pub const Queue = std.fifo.LinearFifo(Entry, .Dynamic);
+
+ pub fn deinit(self: *const @This(), allocator: std.mem.Allocator) void {
+ allocator.free(self.serialized_data);
+ }
+
+ // Create an Offline by serializing the Valkey command directly
+ pub fn create(
+ allocator: std.mem.Allocator,
+ command: *const Command,
+ promise: Promise,
+ ) !Entry {
+ return Entry{
+ .serialized_data = try command.serialize(allocator),
+ .meta = command.meta.check(command),
+ .promise = promise,
+ };
+ }
+};
+
+pub fn deinit(_: *Command) void {
+ // no-op
+}
+
+pub const Meta = packed struct(u8) {
+ return_as_bool: bool = false,
+ supports_auto_pipelining: bool = true,
+ _padding: u6 = 0,
+
+ const not_allowed_autopipeline_commands = bun.ComptimeStringMap(void, .{
+ .{"AUTH"},
+ .{"INFO"},
+ .{"QUIT"},
+ .{"EXEC"},
+ .{"MULTI"},
+ .{"WATCH"},
+ .{"SCRIPT"},
+ .{"SELECT"},
+ .{"CLUSTER"},
+ .{"DISCARD"},
+ .{"UNWATCH"},
+ .{"PIPELINE"},
+ .{"SUBSCRIBE"},
+ .{"PSUBSCRIBE"},
+ .{"UNSUBSCRIBE"},
+ .{"UNPSUBSCRIBE"},
+ });
+
+ pub fn check(self: @This(), command: *const Command) @This() {
+ var new = self;
+ new.supports_auto_pipelining = !not_allowed_autopipeline_commands.has(command.command);
+ return new;
+ }
+};
+
+/// Promise for a Valkey command
+pub const Promise = struct {
+ meta: Meta,
+ promise: JSC.JSPromise.Strong,
+
+ pub fn create(globalObject: *JSC.JSGlobalObject, meta: Meta) Promise {
+ const promise = JSC.JSPromise.Strong.init(globalObject);
+ return Promise{
+ .meta = meta,
+ .promise = promise,
+ };
+ }
+
+ pub fn resolve(self: *Promise, globalObject: *JSC.JSGlobalObject, value: *protocol.RESPValue) void {
+ const js_value = value.toJS(globalObject) catch |err| {
+ self.reject(globalObject, globalObject.takeError(err));
+ return;
+ };
+ self.promise.resolve(globalObject, js_value);
+ }
+
+ pub fn reject(self: *Promise, globalObject: *JSC.JSGlobalObject, jsvalue: JSC.JSValue) void {
+ self.promise.reject(globalObject, jsvalue);
+ }
+
+ pub fn deinit(self: *Promise) void {
+ self.promise.deinit();
+ }
+};
+
+// Command+Promise pair for tracking which command corresponds to which promise
+pub const PromisePair = struct {
+ meta: Meta,
+ promise: Promise,
+
+ pub const Queue = std.fifo.LinearFifo(PromisePair, .Dynamic);
+
+ pub fn rejectCommand(self: *PromisePair, globalObject: *JSC.JSGlobalObject, jsvalue: JSC.JSValue) void {
+ self.promise.reject(globalObject, jsvalue);
+ }
+};
+
+const Command = @This();
+
+const bun = @import("root").bun;
+const JSC = bun.JSC;
+const protocol = @import("valkey_protocol.zig");
+const std = @import("std");
+const Slice = JSC.ZigString.Slice;
diff --git a/src/valkey/ValkeyContext.zig b/src/valkey/ValkeyContext.zig
new file mode 100644
index 0000000000..5feb901874
--- /dev/null
+++ b/src/valkey/ValkeyContext.zig
@@ -0,0 +1,26 @@
+tcp: ?*uws.SocketContext = null,
+unix: ?*uws.SocketContext = null,
+tls: ?*uws.SocketContext = null,
+tls_unix: ?*uws.SocketContext = null,
+
+pub fn deinit(this: *@This()) void {
+ if (this.tcp) |ctx| {
+ this.tcp = null;
+ ctx.deinit(false);
+ }
+ if (this.unix) |ctx| {
+ this.unix = null;
+ ctx.deinit(false);
+ }
+ if (this.tls) |ctx| {
+ this.tls = null;
+ ctx.deinit(true);
+ }
+ if (this.tls_unix) |ctx| {
+ this.tls_unix = null;
+ ctx.deinit(true);
+ }
+}
+
+const bun = @import("root").bun;
+const uws = bun.uws;
diff --git a/src/valkey/index.zig b/src/valkey/index.zig
new file mode 100644
index 0000000000..57c464691f
--- /dev/null
+++ b/src/valkey/index.zig
@@ -0,0 +1,21 @@
+// Entry point for Valkey client
+//
+// Exports:
+// - Core Valkey client implementation in valkey.zig
+// - JavaScript wrapper in js_valkey.zig
+// - Valkey protocol implementation in valkey_protocol.zig
+
+// Import modules
+pub const valkey = @import("valkey.zig");
+pub const js_valkey = @import("js_valkey.zig");
+pub const protocol = @import("valkey_protocol.zig");
+
+// Export JS client
+pub const JSValkeyClient = js_valkey.JSValkeyClient;
+
+// Re-export key types for easy access
+pub const ValkeyClient = valkey.ValkeyClient;
+pub const Protocol = valkey.Protocol;
+pub const Status = valkey.Status;
+pub const Options = valkey.Options;
+pub const Command = @import("ValkeyCommand.zig");
diff --git a/src/valkey/js_valkey.zig b/src/valkey/js_valkey.zig
new file mode 100644
index 0000000000..f5e610a9d0
--- /dev/null
+++ b/src/valkey/js_valkey.zig
@@ -0,0 +1,865 @@
+/// Valkey client wrapper for JavaScript
+pub const JSValkeyClient = struct {
+ client: valkey.ValkeyClient,
+ globalObject: *JSC.JSGlobalObject,
+ this_value: JSC.JSRef = JSC.JSRef.empty(),
+ poll_ref: bun.Async.KeepAlive = .{},
+ timer: JSC.BunTimer.EventLoopTimer = .{
+ .tag = .ValkeyConnectionTimeout,
+ .next = .{
+ .sec = 0,
+ .nsec = 0,
+ },
+ },
+ reconnect_timer: JSC.BunTimer.EventLoopTimer = .{
+ .tag = .ValkeyConnectionReconnect,
+ .next = .{
+ .sec = 0,
+ .nsec = 0,
+ },
+ },
+
+ ref_count: u32 = 1,
+
+ pub usingnamespace JSC.Codegen.JSRedisClient;
+ pub usingnamespace bun.NewRefCounted(JSValkeyClient, deinit, null);
+
+ // Factory function to create a new Valkey client from JS
+ pub fn constructor(globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!*JSValkeyClient {
+ return try create(globalObject, callframe.arguments());
+ }
+
+ pub fn create(globalObject: *JSC.JSGlobalObject, arguments: []const JSValue) bun.JSError!*JSValkeyClient {
+ const vm = globalObject.bunVM();
+ const url_str = if (arguments.len < 1 or arguments[0].isUndefined())
+ if (vm.transpiler.env.get("REDIS_URL") orelse vm.transpiler.env.get("VALKEY_URL")) |url|
+ bun.String.init(url)
+ else
+ bun.String.init("valkey://localhost:6379")
+ else
+ try arguments[0].toBunString(globalObject);
+ defer url_str.deref();
+
+ const url_utf8 = url_str.toUTF8WithoutRef(bun.default_allocator);
+ defer url_utf8.deinit();
+ const url = bun.URL.parse(url_utf8.slice());
+
+ const uri: valkey.Protocol = if (url.protocol.len > 0)
+ valkey.Protocol.Map.get(url.protocol) orelse return globalObject.throw("Expected url protocol to be one of redis, valkey, rediss, valkeys, redis+tls, redis+unix, redis+tls+unix", .{})
+ else
+ .standalone;
+
+ var username: []const u8 = "";
+ var password: []const u8 = "";
+ var hostname: []const u8 = switch (uri) {
+ .standalone_tls, .standalone => url.displayHostname(),
+ .standalone_unix, .standalone_tls_unix => brk: {
+ const unix_socket_path = bun.strings.indexOf(url_utf8.slice(), "://") orelse {
+ return globalObject.throwInvalidArguments("Expected unix socket path after valkey+unix:// or valkey+tls+unix://", .{});
+ };
+ const path = url_utf8.slice()[unix_socket_path + 3 ..];
+ if (bun.strings.indexOfChar(path, '?')) |query_index| {
+ break :brk path[0..query_index];
+ }
+ if (path.len == 0) {
+ // "valkey+unix://?abc=123"
+ return globalObject.throwInvalidArguments("Expected unix socket path after valkey+unix:// or valkey+tls+unix://", .{});
+ }
+
+ break :brk path;
+ },
+ };
+
+ const port = switch (uri) {
+ .standalone_unix, .standalone_tls_unix => 0,
+ else => url.getPort() orelse 6379,
+ };
+
+ const options = if (arguments.len >= 2 and !arguments[1].isUndefinedOrNull() and arguments[1].isObject())
+ try Options.fromJS(globalObject, arguments[1])
+ else
+ valkey.Options{};
+
+ var connection_strings: []u8 = &.{};
+ errdefer {
+ bun.default_allocator.free(connection_strings);
+ }
+
+ if (url.username.len > 0 or url.password.len > 0 or hostname.len > 0) {
+ var b = bun.StringBuilder{};
+ b.count(url.username);
+ b.count(url.password);
+ b.count(hostname);
+ try b.allocate(bun.default_allocator);
+ username = b.append(url.username);
+ password = b.append(url.password);
+ hostname = b.append(hostname);
+ connection_strings = b.allocatedSlice();
+ }
+
+ const database = if (url.pathname.len > 0) std.fmt.parseInt(u32, url.pathname[1..], 10) catch 0 else 0;
+
+ bun.analytics.Features.valkey += 1;
+
+ return JSValkeyClient.new(.{
+ .client = valkey.ValkeyClient{
+ .vm = vm,
+ .address = switch (uri) {
+ .standalone_unix, .standalone_tls_unix => .{ .unix = hostname },
+ else => .{
+ .host = .{
+ .host = hostname,
+ .port = port,
+ },
+ },
+ },
+ .username = username,
+ .password = password,
+ .in_flight = .init(bun.default_allocator),
+ .queue = .init(bun.default_allocator),
+ .status = .disconnected,
+ .connection_strings = connection_strings,
+ .socket = .{
+ .SocketTCP = .{
+ .socket = .{
+ .detached = {},
+ },
+ },
+ },
+ .database = database,
+ .allocator = bun.default_allocator,
+ .flags = .{
+ .enable_auto_reconnect = options.enable_auto_reconnect,
+ .enable_offline_queue = options.enable_offline_queue,
+ .auto_pipelining = options.enable_auto_pipelining,
+ },
+ .max_retries = options.max_retries,
+ .connection_timeout_ms = options.connection_timeout_ms,
+ .idle_timeout_interval_ms = options.idle_timeout_ms,
+ },
+ .globalObject = globalObject,
+ .ref_count = 1,
+ });
+ }
+
+ pub fn getConnected(this: *JSValkeyClient, _: *JSC.JSGlobalObject) JSValue {
+ return JSValue.jsBoolean(this.client.status == .connected);
+ }
+
+ pub fn getBufferedAmount(this: *JSValkeyClient, _: *JSC.JSGlobalObject) JSValue {
+ const len =
+ this.client.write_buffer.len() +
+ this.client.read_buffer.len();
+ return JSValue.jsNumber(len);
+ }
+
+ pub fn jsConnect(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ this.ref();
+ defer this.deref();
+
+ const this_value = callframe.this();
+
+ // If already connected, resolve immediately
+ if (this.client.status == .connected) {
+ return JSC.JSPromise.resolvedPromiseValue(globalObject, JSValkeyClient.helloGetCached(this_value) orelse .undefined);
+ }
+
+ if (JSValkeyClient.connectionPromiseGetCached(this_value)) |promise| {
+ return promise;
+ }
+
+ const promise_ptr = JSC.JSPromise.create(globalObject);
+ const promise = promise_ptr.asValue(globalObject);
+ JSValkeyClient.connectionPromiseSetCached(this_value, globalObject, promise);
+
+ // If was manually closed, reset that flag
+ this.client.flags.is_manually_closed = false;
+ this.this_value.setStrong(this_value, globalObject);
+
+ if (this.client.flags.needs_to_open_socket) {
+ this.poll_ref.ref(this.client.vm);
+
+ this.connect() catch |err| {
+ this.poll_ref.unref(this.client.vm);
+ this.client.flags.needs_to_open_socket = true;
+ const err_value = globalObject.ERR_SOCKET_CLOSED_BEFORE_CONNECTION(" {s} connecting to Valkey", .{@errorName(err)}).toJS();
+ promise_ptr.reject(globalObject, err_value);
+ return promise;
+ };
+
+ this.resetConnectionTimeout();
+ return promise;
+ }
+
+ switch (this.client.status) {
+ .disconnected => {
+ this.client.flags.is_reconnecting = true;
+ this.client.retry_attempts = 0;
+ this.reconnect();
+ },
+ .failed => {
+ this.client.status = .disconnected;
+ this.client.flags.is_reconnecting = true;
+ this.client.retry_attempts = 0;
+ this.reconnect();
+ },
+ else => {},
+ }
+
+ return promise;
+ }
+
+ pub fn jsDisconnect(this: *JSValkeyClient, _: *JSC.JSGlobalObject, _: *JSC.CallFrame) bun.JSError!JSValue {
+ if (this.client.status == .disconnected) {
+ return .undefined;
+ }
+ this.client.disconnect();
+ return .undefined;
+ }
+
+ pub fn getOnConnect(_: *JSValkeyClient, thisValue: JSValue, _: *JSC.JSGlobalObject) JSValue {
+ if (JSValkeyClient.onconnectGetCached(thisValue)) |value| {
+ return value;
+ }
+ return .undefined;
+ }
+
+ pub fn setOnConnect(_: *JSValkeyClient, thisValue: JSValue, globalObject: *JSC.JSGlobalObject, value: JSValue) bool {
+ JSValkeyClient.onconnectSetCached(thisValue, globalObject, value);
+ return true;
+ }
+
+ pub fn getOnClose(_: *JSValkeyClient, thisValue: JSValue, _: *JSC.JSGlobalObject) JSValue {
+ if (JSValkeyClient.oncloseGetCached(thisValue)) |value| {
+ return value;
+ }
+ return .undefined;
+ }
+
+ pub fn setOnClose(_: *JSValkeyClient, thisValue: JSValue, globalObject: *JSC.JSGlobalObject, value: JSValue) bool {
+ JSValkeyClient.oncloseSetCached(thisValue, globalObject, value);
+ return true;
+ }
+
+ /// Safely add a timer with proper reference counting and event loop keepalive
+ fn addTimer(this: *JSValkeyClient, timer: *JSC.BunTimer.EventLoopTimer, next_timeout_ms: u32) void {
+ this.ref();
+ defer this.deref();
+
+ // If the timer is already active, we need to remove it first
+ if (timer.state == .ACTIVE) {
+ this.removeTimer(timer);
+ }
+
+ // Skip if timeout is zero
+ if (next_timeout_ms == 0) {
+ return;
+ }
+
+ // Store VM reference to use later
+ const vm = this.client.vm;
+
+ // Set up timer and add to event loop
+ timer.next = bun.timespec.msFromNow(@intCast(next_timeout_ms));
+ vm.timer.insert(timer);
+ this.ref();
+ }
+
+ /// Safely remove a timer with proper reference counting and event loop keepalive
+ fn removeTimer(this: *JSValkeyClient, timer: *JSC.BunTimer.EventLoopTimer) void {
+ if (timer.state == .ACTIVE) {
+
+ // Store VM reference to use later
+ const vm = this.client.vm;
+
+ // Remove the timer from the event loop
+ vm.timer.remove(timer);
+
+ // Balance the ref from addTimer
+ this.deref();
+ }
+ }
+
+ fn resetConnectionTimeout(this: *JSValkeyClient) void {
+ const interval = this.client.getTimeoutInterval();
+
+ // First remove existing timer if active
+ if (this.timer.state == .ACTIVE) {
+ this.removeTimer(&this.timer);
+ }
+
+ // Add new timer if interval is non-zero
+ if (interval > 0) {
+ this.addTimer(&this.timer, interval);
+ }
+ }
+
+ pub fn disableConnectionTimeout(this: *JSValkeyClient) void {
+ if (this.timer.state == .ACTIVE) {
+ this.removeTimer(&this.timer);
+ }
+ this.timer.state = .CANCELLED;
+ }
+
+ pub fn onConnectionTimeout(this: *JSValkeyClient) JSC.BunTimer.EventLoopTimer.Arm {
+ debug("onConnectionTimeout", .{});
+
+ // Mark timer as fired
+ this.timer.state = .FIRED;
+
+ // Increment ref to ensure 'this' stays alive throughout the function
+ this.ref();
+ defer this.deref();
+
+ if (this.client.getTimeoutInterval() == 0) {
+ this.resetConnectionTimeout();
+ return .disarm;
+ }
+
+ var buf: [128]u8 = undefined;
+ switch (this.client.status) {
+ .connected => {
+ const msg = std.fmt.bufPrintZ(&buf, "Idle timeout reached after {d}ms", .{this.client.idle_timeout_interval_ms}) catch unreachable;
+ this.clientFail(msg, protocol.RedisError.IdleTimeout);
+ },
+ .disconnected, .connecting => {
+ const msg = std.fmt.bufPrintZ(&buf, "Connection timeout reached after {d}ms", .{this.client.connection_timeout_ms}) catch unreachable;
+ this.clientFail(msg, protocol.RedisError.ConnectionTimeout);
+ },
+ else => {
+ // No timeout for other states
+ },
+ }
+
+ return .disarm;
+ }
+
+ pub fn onReconnectTimer(this: *JSValkeyClient) JSC.BunTimer.EventLoopTimer.Arm {
+ debug("Reconnect timer fired, attempting to reconnect", .{});
+
+ // Mark timer as fired and store important values before doing any derefs
+ this.reconnect_timer.state = .FIRED;
+
+ // Increment ref to ensure 'this' stays alive throughout the function
+ this.ref();
+ defer this.deref();
+
+ // Execute reconnection logic
+ this.reconnect();
+
+ return .disarm;
+ }
+
+ pub fn reconnect(this: *JSValkeyClient) void {
+ if (!this.client.flags.is_reconnecting) {
+ return;
+ }
+
+ const vm = this.client.vm;
+
+ if (vm.isShuttingDown()) {
+ @branchHint(.unlikely);
+ return;
+ }
+
+ // Ref to keep this alive during the reconnection
+ this.ref();
+ defer this.deref();
+
+ this.client.status = .connecting;
+
+ // Set retry to 0 to avoid incremental backoff from previous attempts
+ this.client.retry_attempts = 0;
+
+ // Ref the poll to keep event loop alive during connection
+ this.poll_ref.disable();
+ this.poll_ref = .{};
+ this.poll_ref.ref(vm);
+
+ this.connect() catch |err| {
+ this.failWithJSValue(this.globalObject.ERR_SOCKET_CLOSED_BEFORE_CONNECTION("{s} reconnecting", .{@errorName(err)}).toJS());
+ this.poll_ref.disable();
+ return;
+ };
+
+ // Reset the socket timeout
+ this.resetConnectionTimeout();
+ }
+
+ // Callback for when Valkey client connects
+ pub fn onValkeyConnect(this: *JSValkeyClient, value: *protocol.RESPValue) void {
+ // Safety check to ensure a valid connection state
+ if (this.client.status != .connected) {
+ debug("onValkeyConnect called but client status is not 'connected': {s}", .{@tagName(this.client.status)});
+ return;
+ }
+
+ const globalObject = this.globalObject;
+ const event_loop = this.client.vm.eventLoop();
+ event_loop.enter();
+ defer event_loop.exit();
+
+ if (this.this_value.tryGet()) |this_value| {
+ const hello_value = value.toJS(globalObject) catch .undefined;
+ JSValkeyClient.helloSetCached(this_value, globalObject, hello_value);
+ // Call onConnect callback if defined by the user
+ if (JSValkeyClient.onconnectGetCached(this_value)) |on_connect| {
+ const js_value = this_value;
+ js_value.ensureStillAlive();
+ globalObject.queueMicrotask(on_connect, &[_]JSValue{ js_value, hello_value });
+ }
+
+ if (JSValkeyClient.connectionPromiseGetCached(this_value)) |promise| {
+ JSValkeyClient.connectionPromiseSetCached(this_value, globalObject, .zero);
+ promise.asPromise().?.resolve(globalObject, hello_value);
+ }
+ }
+
+ this.client.onWritable();
+ this.updatePollRef();
+ }
+
+ // Callback for when Valkey client needs to reconnect
+ pub fn onValkeyReconnect(this: *JSValkeyClient) void {
+ // Schedule reconnection using our safe timer methods
+ if (this.reconnect_timer.state == .ACTIVE) {
+ this.removeTimer(&this.reconnect_timer);
+ }
+
+ const delay_ms = this.client.getReconnectDelay();
+ if (delay_ms > 0) {
+ this.addTimer(&this.reconnect_timer, delay_ms);
+ }
+ }
+
+ // Callback for when Valkey client closes
+ pub fn onValkeyClose(this: *JSValkeyClient) void {
+ const globalObject = this.globalObject;
+ this.poll_ref.disable();
+ defer this.deref();
+
+ const this_jsvalue = this.this_value.tryGet() orelse return;
+ this.this_value.setWeak(this_jsvalue);
+ this.ref();
+ defer this.deref();
+
+ // Create an error value
+ const error_value = protocol.valkeyErrorToJS(globalObject, "Connection closed", protocol.RedisError.ConnectionClosed);
+
+ const loop = this.client.vm.eventLoop();
+ loop.enter();
+ defer loop.exit();
+
+ if (this_jsvalue != .undefined) {
+ if (JSValkeyClient.connectionPromiseGetCached(this_jsvalue)) |promise| {
+ JSValkeyClient.connectionPromiseSetCached(this_jsvalue, globalObject, .zero);
+ promise.asPromise().?.reject(globalObject, error_value);
+ }
+ }
+
+ // Call onClose callback if it exists
+ if (JSValkeyClient.oncloseGetCached(this_jsvalue)) |on_close| {
+ _ = on_close.call(
+ globalObject,
+ this_jsvalue,
+ &[_]JSValue{error_value},
+ ) catch |e| globalObject.reportActiveExceptionAsUnhandled(e);
+ }
+ }
+
+ // Callback for when Valkey client times out
+ pub fn onValkeyTimeout(this: *JSValkeyClient) void {
+ this.clientFail("Connection timeout", protocol.RedisError.ConnectionClosed);
+ }
+
+ pub fn clientFail(this: *JSValkeyClient, message: []const u8, err: protocol.RedisError) void {
+ this.client.fail(message, err);
+ }
+
+ pub fn failWithJSValue(this: *JSValkeyClient, value: JSValue) void {
+ const this_value = this.this_value.tryGet() orelse return;
+ const globalObject = this.globalObject;
+ if (JSValkeyClient.oncloseGetCached(this_value)) |on_close| {
+ const loop = this.client.vm.eventLoop();
+ loop.enter();
+ defer loop.exit();
+ _ = on_close.call(
+ globalObject,
+ this_value,
+ &[_]JSValue{value},
+ ) catch |e| globalObject.reportActiveExceptionAsUnhandled(e);
+ }
+ }
+
+ pub fn finalize(this: *JSValkeyClient) void {
+ // Since this.stopTimers impacts the reference count potentially, we
+ // need to ref/unref here as well.
+ this.ref();
+ defer this.deref();
+
+ this.stopTimers();
+ this.this_value.deinit();
+ if (this.client.status == .connected or this.client.status == .connecting) {
+ this.client.flags.is_manually_closed = true;
+ }
+ this.client.flags.finalized = true;
+ this.client.close();
+ this.deref();
+ }
+
+ pub fn stopTimers(this: *JSValkeyClient) void {
+ // Use safe timer removal methods to ensure proper reference counting
+ if (this.timer.state == .ACTIVE) {
+ this.removeTimer(&this.timer);
+ }
+ if (this.reconnect_timer.state == .ACTIVE) {
+ this.removeTimer(&this.reconnect_timer);
+ }
+ }
+
+ fn connect(this: *JSValkeyClient) !void {
+ this.client.flags.needs_to_open_socket = false;
+ const vm = this.client.vm;
+
+ const ctx: *uws.SocketContext, const deinit_context: bool =
+ switch (this.client.tls) {
+ .none => .{
+ vm.rareData().valkey_context.tcp orelse brk_ctx: {
+ // TCP socket
+ var err: uws.create_bun_socket_error_t = .none;
+ const ctx_ = uws.us_create_bun_socket_context(0, vm.uwsLoop(), @sizeOf(*JSValkeyClient), uws.us_bun_socket_context_options_t{}, &err).?;
+ uws.NewSocketHandler(false).configure(ctx_, true, *JSValkeyClient, SocketHandler(false));
+ vm.rareData().valkey_context.tcp = ctx_;
+ break :brk_ctx ctx_;
+ },
+ false,
+ },
+ .enabled => .{
+ vm.rareData().valkey_context.tls orelse brk_ctx: {
+ // TLS socket, default config
+ var err: uws.create_bun_socket_error_t = .none;
+ const ctx_ = uws.us_create_bun_socket_context(1, vm.uwsLoop(), @sizeOf(*JSValkeyClient), uws.us_bun_socket_context_options_t{}, &err).?;
+ uws.NewSocketHandler(true).configure(ctx_, true, *JSValkeyClient, SocketHandler(true));
+ vm.rareData().valkey_context.tls = ctx_;
+ break :brk_ctx ctx_;
+ },
+ false,
+ },
+ .custom => |*custom| brk_ctx: {
+ // TLS socket, custom config
+ var err: uws.create_bun_socket_error_t = .none;
+ const options = custom.asUSockets();
+ const ctx_ = uws.us_create_bun_socket_context(1, vm.uwsLoop(), @sizeOf(*JSValkeyClient), options, &err).?;
+ uws.NewSocketHandler(true).configure(ctx_, true, *JSValkeyClient, SocketHandler(true));
+ break :brk_ctx .{ ctx_, true };
+ },
+ };
+ this.ref();
+
+ defer {
+ if (deinit_context) {
+ // This is actually unref(). uws.Context is reference counted.
+ ctx.deinit(true);
+ }
+ }
+ this.client.socket = try this.client.address.connect(&this.client, ctx, this.client.tls != .none);
+ }
+
+ pub fn send(this: *JSValkeyClient, globalThis: *JSC.JSGlobalObject, this_jsvalue: JSValue, command: *const Command) !*JSC.JSPromise {
+ if (this.client.flags.needs_to_open_socket) {
+ @branchHint(.unlikely);
+
+ if (this.this_value != .strong)
+ this.this_value.setStrong(this_jsvalue, globalThis);
+
+ this.connect() catch |err| {
+ this.client.flags.needs_to_open_socket = true;
+ const err_value = globalThis.ERR_SOCKET_CLOSED_BEFORE_CONNECTION(" {s} connecting to Valkey", .{@errorName(err)}).toJS();
+ const promise = JSC.JSPromise.create(globalThis);
+ promise.reject(globalThis, err_value);
+ return promise;
+ };
+ this.resetConnectionTimeout();
+ }
+
+ defer this.updatePollRef();
+
+ return try this.client.send(globalThis, command);
+ }
+
+ // Getter for memory cost - useful for diagnostics
+ pub fn memoryCost(this: *JSValkeyClient) usize {
+ var memory_cost: usize = @sizeOf(JSValkeyClient);
+
+ // Add size of all internal buffers
+ memory_cost += this.client.write_buffer.byte_list.cap;
+ memory_cost += this.client.read_buffer.byte_list.cap;
+
+ // Add queue sizes
+ memory_cost += this.client.in_flight.count * @sizeOf(valkey.Command.PromisePair);
+ for (this.client.queue.readableSlice(0)) |*command| {
+ memory_cost += command.serialized_data.len;
+ }
+ memory_cost += this.client.queue.count * @sizeOf(valkey.Command.Entry);
+ return memory_cost;
+ }
+
+ pub fn deinit(this: *JSValkeyClient) void {
+ bun.debugAssert(this.client.socket.isClosed());
+
+ this.client.deinit(null);
+ this.poll_ref.disable();
+ this.stopTimers();
+ this.this_value.deinit();
+ bun.debugAssert(this.ref_count == 0);
+ this.destroy();
+ }
+
+ /// Keep the event loop alive, or don't keep it alive
+ pub fn updatePollRef(this: *JSValkeyClient) void {
+ if (!this.client.hasAnyPendingCommands() and this.client.status == .connected) {
+ this.poll_ref.unref(this.client.vm);
+ // If we don't have any pending commands and we're connected, we don't need to keep the object alive.
+ if (this.this_value.tryGet()) |value| {
+ this.this_value.setWeak(value);
+ }
+ } else if (this.client.hasAnyPendingCommands()) {
+ this.poll_ref.ref(this.client.vm);
+ // If we have pending commands, we need to keep the object alive.
+ if (this.this_value == .weak) {
+ this.this_value.upgrade(this.globalObject);
+ }
+ }
+ }
+
+ pub const jsSend = fns.jsSend;
+ pub const @"type" = fns.type;
+ pub const append = fns.append;
+ pub const bitcount = fns.bitcount;
+ pub const decr = fns.decr;
+ pub const del = fns.del;
+ pub const dump = fns.dump;
+ pub const exists = fns.exists;
+ pub const expire = fns.expire;
+ pub const expiretime = fns.expiretime;
+ pub const get = fns.get;
+ pub const getdel = fns.getdel;
+ pub const getex = fns.getex;
+ pub const getset = fns.getset;
+ pub const hgetall = fns.hgetall;
+ pub const hincrby = fns.hincrby;
+ pub const hincrbyfloat = fns.hincrbyfloat;
+ pub const hkeys = fns.hkeys;
+ pub const hlen = fns.hlen;
+ pub const hmget = fns.hmget;
+ pub const hmset = fns.hmset;
+ pub const hstrlen = fns.hstrlen;
+ pub const hvals = fns.hvals;
+ pub const incr = fns.incr;
+ pub const keys = fns.keys;
+ pub const llen = fns.llen;
+ pub const lpop = fns.lpop;
+ pub const lpush = fns.lpush;
+ pub const lpushx = fns.lpushx;
+ pub const mget = fns.mget;
+ pub const persist = fns.persist;
+ pub const pexpiretime = fns.pexpiretime;
+ pub const pfadd = fns.pfadd;
+ pub const ping = fns.ping;
+ pub const psubscribe = fns.psubscribe;
+ pub const pttl = fns.pttl;
+ pub const publish = fns.publish;
+ pub const pubsub = fns.pubsub;
+ pub const punsubscribe = fns.punsubscribe;
+ pub const rpop = fns.rpop;
+ pub const rpush = fns.rpush;
+ pub const rpushx = fns.rpushx;
+ pub const sadd = fns.sadd;
+ pub const scard = fns.scard;
+ pub const script = fns.script;
+ pub const select = fns.select;
+ pub const set = fns.set;
+ pub const setnx = fns.setnx;
+ pub const sismember = fns.sismember;
+ pub const smembers = fns.smembers;
+ pub const smove = fns.smove;
+ pub const spop = fns.spop;
+ pub const spublish = fns.spublish;
+ pub const srandmember = fns.srandmember;
+ pub const srem = fns.srem;
+ pub const strlen = fns.strlen;
+ pub const subscribe = fns.subscribe;
+ pub const substr = fns.substr;
+ pub const ttl = fns.ttl;
+ pub const unsubscribe = fns.unsubscribe;
+ pub const zcard = fns.zcard;
+ pub const zpopmax = fns.zpopmax;
+ pub const zpopmin = fns.zpopmin;
+ pub const zrandmember = fns.zrandmember;
+ pub const zrank = fns.zrank;
+ pub const zrevrank = fns.zrevrank;
+ pub const zscore = fns.zscore;
+
+ const fns = @import("./js_valkey_functions.zig");
+};
+
+// Socket handler for the uWebSockets library
+fn SocketHandler(comptime ssl: bool) type {
+ return struct {
+ const SocketType = uws.NewSocketHandler(ssl);
+ fn _socket(s: SocketType) Socket {
+ if (comptime ssl) {
+ return Socket{ .SocketTLS = s };
+ }
+
+ return Socket{ .SocketTCP = s };
+ }
+ pub fn onOpen(this: *JSValkeyClient, socket: SocketType) void {
+ this.client.socket = _socket(socket);
+ this.client.onOpen(_socket(socket));
+ }
+
+ fn onHandshake_(this: *JSValkeyClient, _: anytype, success: i32, ssl_error: uws.us_bun_verify_error_t) void {
+ debug("onHandshake: {d} {d}", .{ success, ssl_error.error_no });
+ const handshake_success = if (success == 1) true else false;
+ this.ref();
+ defer this.deref();
+ if (handshake_success) {
+ const vm = this.client.vm;
+ if (this.client.tls.rejectUnauthorized(vm)) {
+ if (ssl_error.error_no != 0) {
+ // only reject the connection if reject_unauthorized == true
+
+ const ssl_ptr: *BoringSSL.c.SSL = @ptrCast(this.client.socket.getNativeHandle());
+ if (BoringSSL.c.SSL_get_servername(ssl_ptr, 0)) |servername| {
+ const hostname = servername[0..bun.len(servername)];
+ if (!BoringSSL.checkServerIdentity(ssl_ptr, hostname)) {
+ this.client.flags.is_authenticated = false;
+ const loop = vm.eventLoop();
+ loop.enter();
+ defer loop.exit();
+ this.client.status = .failed;
+ this.client.flags.is_manually_closed = true;
+ this.client.failWithJSValue(this.globalObject, ssl_error.toJS(this.globalObject));
+ this.client.close();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ pub const onHandshake = if (ssl) onHandshake_ else null;
+
+ pub fn onClose(this: *JSValkeyClient, _: SocketType, _: i32, _: ?*anyopaque) void {
+ // Ensure the socket pointer is updated.
+ this.client.socket = .{ .SocketTCP = .detached };
+
+ this.client.onClose();
+ }
+
+ pub fn onEnd(this: *JSValkeyClient, socket: SocketType) void {
+ // Ensure the socket pointer is updated before closing
+ this.client.socket = _socket(socket);
+
+ // Do not allow half-open connections
+ socket.close(.normal);
+ }
+
+ pub fn onConnectError(this: *JSValkeyClient, _: SocketType, _: i32) void {
+ // Ensure the socket pointer is updated.
+ this.client.socket = .{ .SocketTCP = .detached };
+
+ this.client.onClose();
+ }
+
+ pub fn onTimeout(this: *JSValkeyClient, socket: SocketType) void {
+ this.client.socket = _socket(socket);
+ // Handle socket timeout
+ }
+
+ pub fn onData(this: *JSValkeyClient, socket: SocketType, data: []const u8) void {
+ // Ensure the socket pointer is updated.
+ this.client.socket = _socket(socket);
+
+ this.ref();
+ defer this.deref();
+ this.client.onData(data);
+ this.updatePollRef();
+ }
+
+ pub fn onWritable(this: *JSValkeyClient, socket: SocketType) void {
+ this.client.socket = _socket(socket);
+ this.ref();
+ defer this.deref();
+ this.client.onWritable();
+ this.updatePollRef();
+ }
+ };
+}
+
+// Parse JavaScript options into Valkey client options
+const Options = struct {
+ pub fn fromJS(globalObject: *JSC.JSGlobalObject, options_obj: JSC.JSValue) !valkey.Options {
+ var this = valkey.Options{
+ .enable_auto_pipelining = !bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_REDIS_AUTO_PIPELINING"),
+ };
+
+ if (try options_obj.getIfPropertyExists(globalObject, "idleTimeout")) |idle_timeout| {
+ if (!idle_timeout.isEmptyOrUndefinedOrNull())
+ this.idle_timeout_ms = try globalObject.validateIntegerRange(idle_timeout, u32, 0, .{ .min = 0, .max = std.math.maxInt(u32) });
+ }
+
+ if (try options_obj.getIfPropertyExists(globalObject, "connectionTimeout")) |connection_timeout| {
+ if (!connection_timeout.isEmptyOrUndefinedOrNull())
+ this.connection_timeout_ms = try globalObject.validateIntegerRange(connection_timeout, u32, 0, .{ .min = 0, .max = std.math.maxInt(u32) });
+ }
+
+ if (try options_obj.getIfPropertyExists(globalObject, "autoReconnect")) |auto_reconnect| {
+ this.enable_auto_reconnect = auto_reconnect.toBoolean();
+ }
+
+ if (try options_obj.getIfPropertyExists(globalObject, "maxRetries")) |max_retries| {
+ this.max_retries = try globalObject.validateIntegerRange(max_retries, u32, 0, .{ .min = 0, .max = std.math.maxInt(u32) });
+ }
+
+ if (try options_obj.getIfPropertyExists(globalObject, "enableOfflineQueue")) |enable_offline_queue| {
+ this.enable_offline_queue = enable_offline_queue.toBoolean();
+ }
+
+ if (try options_obj.getIfPropertyExists(globalObject, "enableAutoPipelining")) |enable_auto_pipelining| {
+ this.enable_auto_pipelining = enable_auto_pipelining.toBoolean();
+ }
+
+ if (try options_obj.getIfPropertyExists(globalObject, "tls")) |tls| {
+ if (tls.isBoolean() or tls.isUndefinedOrNull()) {
+ this.tls = if (tls.toBoolean()) .enabled else .none;
+ } else if (tls.isObject()) {
+ if (try JSC.API.ServerConfig.SSLConfig.fromJS(globalObject.bunVM(), globalObject, tls)) |ssl_config| {
+ this.tls = .{ .custom = ssl_config };
+ } else {
+ return globalObject.throwInvalidArgumentType("tls", "tls", "object");
+ }
+ } else {
+ return globalObject.throwInvalidArgumentType("tls", "tls", "boolean or object");
+ }
+ }
+
+ return this;
+ }
+};
+
+const std = @import("std");
+const bun = @import("root").bun;
+const valkey = @import("valkey.zig");
+const protocol = @import("valkey_protocol.zig");
+const JSC = bun.JSC;
+const String = bun.String;
+const debug = bun.Output.scoped(.RedisJS, false);
+const uws = bun.uws;
+
+const JSValue = JSC.JSValue;
+const Socket = uws.AnySocket;
+const RedisError = protocol.RedisError;
+const Command = @import("ValkeyCommand.zig");
+const BoringSSL = bun.BoringSSL;
diff --git a/src/valkey/js_valkey_functions.zig b/src/valkey/js_valkey_functions.zig
new file mode 100644
index 0000000000..1278a29025
--- /dev/null
+++ b/src/valkey/js_valkey_functions.zig
@@ -0,0 +1,782 @@
+pub fn jsSend(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const command = try callframe.argument(0).toBunString(globalObject);
+ defer command.deref();
+
+ const args_array = callframe.argument(1);
+ if (!args_array.isObject() or !args_array.isArray()) {
+ return globalObject.throw("Arguments must be an array", .{});
+ }
+ var iter = args_array.arrayIterator(globalObject);
+ var args = try std.ArrayList(JSArgument).initCapacity(bun.default_allocator, iter.len);
+ defer {
+ for (args.items) |*item| {
+ item.deinit();
+ }
+ args.deinit();
+ }
+
+ while (iter.next()) |arg_js| {
+ args.appendAssumeCapacity(try fromJS(globalObject, arg_js) orelse {
+ return globalObject.throwInvalidArgumentType("sendCommand", "argument", "string or buffer");
+ });
+ }
+
+ const cmd_str = command.toUTF8WithoutRef(bun.default_allocator);
+ defer cmd_str.deinit();
+ var cmd: Command = .{
+ .command = cmd_str.slice(),
+ .args = .{ .args = args.items },
+ .meta = .{},
+ };
+ cmd.meta = cmd.meta.check(&cmd);
+ // Send command with slices directly
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &cmd,
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn get(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("get", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send GET command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "GET",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send GET command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn set(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("set", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ const value = (try fromJS(globalObject, callframe.argument(1))) orelse {
+ return globalObject.throwInvalidArgumentType("set", "value", "string or buffer");
+ };
+ defer value.deinit();
+
+ // Send SET command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SET",
+ .args = .{ .args = &.{ key, value } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SET command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn incr(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("incr", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send INCR command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "INCR",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send INCR command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn decr(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("decr", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send DECR command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "DECR",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send DECR command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn exists(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("exists", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send EXISTS command with special Exists type for boolean conversion
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "EXISTS",
+ .args = .{ .args = &.{key} },
+ .meta = .{ .return_as_bool = true },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send EXISTS command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn expire(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("expire", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ const seconds = try globalObject.validateIntegerRange(callframe.argument(1), i32, 0, .{ .min = 0, .max = 2147483647 });
+
+ // Convert seconds to a string
+ var int_buf: [64]u8 = undefined;
+ const seconds_len = std.fmt.formatIntBuf(&int_buf, seconds, 10, .lower, .{});
+ const seconds_slice = int_buf[0..seconds_len];
+
+ // Send EXPIRE command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "EXPIRE",
+ .args = .{ .raw = &.{ key.slice(), seconds_slice } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send EXPIRE command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub fn ttl(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("ttl", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send TTL command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "TTL",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send TTL command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement srem (remove value from a set)
+pub fn srem(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("srem", "key", "string or buffer");
+ };
+ defer key.deinit();
+ const value = (try fromJS(globalObject, callframe.argument(1))) orelse {
+ return globalObject.throwInvalidArgumentType("srem", "value", "string or buffer");
+ };
+ defer value.deinit();
+
+ // Send SREM command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SREM",
+ .args = .{ .args = &.{ key, value } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SREM command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement srandmember (get random member from set)
+pub fn srandmember(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("srandmember", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send SRANDMEMBER command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SRANDMEMBER",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SRANDMEMBER command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement smembers (get all members of a set)
+pub fn smembers(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("smembers", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send SMEMBERS command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SMEMBERS",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SMEMBERS command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement spop (pop a random member from a set)
+pub fn spop(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("spop", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Send SPOP command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SPOP",
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SPOP command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement sadd (add member to a set)
+pub fn sadd(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("sadd", "key", "string or buffer");
+ };
+ defer key.deinit();
+ const value = (try fromJS(globalObject, callframe.argument(1))) orelse {
+ return globalObject.throwInvalidArgumentType("sadd", "value", "string or buffer");
+ };
+ defer value.deinit();
+
+ // Send SADD command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SADD",
+ .args = .{ .args = &.{ key, value } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SADD command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement sismember (check if value is member of a set)
+pub fn sismember(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("sismember", "key", "string or buffer");
+ };
+ defer key.deinit();
+ const value = (try fromJS(globalObject, callframe.argument(1))) orelse {
+ return globalObject.throwInvalidArgumentType("sismember", "value", "string or buffer");
+ };
+ defer value.deinit();
+
+ // Send SISMEMBER command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "SISMEMBER",
+ .args = .{ .args = &.{ key, value } },
+ .meta = .{ .return_as_bool = true },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send SISMEMBER command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement hmget (get multiple values from hash)
+pub fn hmget(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType("hmget", "key", "string or buffer");
+ };
+ defer key.deinit();
+
+ // Get field array argument
+ const fields_array = callframe.argument(1);
+ if (!fields_array.isObject() or !fields_array.isArray()) {
+ return globalObject.throw("Fields must be an array", .{});
+ }
+
+ var iter = fields_array.arrayIterator(globalObject);
+ var args = try std.ArrayList(JSC.ZigString.Slice).initCapacity(bun.default_allocator, iter.len + 1);
+ defer {
+ for (args.items) |item| {
+ item.deinit();
+ }
+ args.deinit();
+ }
+
+ args.appendAssumeCapacity(JSC.ZigString.Slice.fromUTF8NeverFree(key.slice()));
+
+ // Add field names as arguments
+ while (iter.next()) |field_js| {
+ const field_str = try field_js.toBunString(globalObject);
+ defer field_str.deref();
+
+ const field_slice = field_str.toUTF8WithoutRef(bun.default_allocator);
+ args.appendAssumeCapacity(field_slice);
+ }
+
+ // Send HMGET command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "HMGET",
+ .args = .{ .slices = args.items },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send HMGET command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement hincrby (increment hash field by integer value)
+pub fn hincrby(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = try callframe.argument(0).toBunString(globalObject);
+ defer key.deref();
+ const field = try callframe.argument(1).toBunString(globalObject);
+ defer field.deref();
+ const value = try callframe.argument(2).toBunString(globalObject);
+ defer value.deref();
+
+ const key_slice = key.toUTF8WithoutRef(bun.default_allocator);
+ defer key_slice.deinit();
+ const field_slice = field.toUTF8WithoutRef(bun.default_allocator);
+ defer field_slice.deinit();
+ const value_slice = value.toUTF8WithoutRef(bun.default_allocator);
+ defer value_slice.deinit();
+
+ // Send HINCRBY command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "HINCRBY",
+ .args = .{ .slices = &.{ key_slice, field_slice, value_slice } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send HINCRBY command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement hincrbyfloat (increment hash field by float value)
+pub fn hincrbyfloat(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = try callframe.argument(0).toBunString(globalObject);
+ defer key.deref();
+ const field = try callframe.argument(1).toBunString(globalObject);
+ defer field.deref();
+ const value = try callframe.argument(2).toBunString(globalObject);
+ defer value.deref();
+
+ const key_slice = key.toUTF8WithoutRef(bun.default_allocator);
+ defer key_slice.deinit();
+ const field_slice = field.toUTF8WithoutRef(bun.default_allocator);
+ defer field_slice.deinit();
+ const value_slice = value.toUTF8WithoutRef(bun.default_allocator);
+ defer value_slice.deinit();
+
+ // Send HINCRBYFLOAT command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "HINCRBYFLOAT",
+ .args = .{ .slices = &.{ key_slice, field_slice, value_slice } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send HINCRBYFLOAT command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+// Implement hmset (set multiple values in hash)
+pub fn hmset(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = try callframe.argument(0).toBunString(globalObject);
+ defer key.deref();
+
+ // For simplicity, let's accept a list of alternating keys and values
+ const array_arg = callframe.argument(1);
+ if (!array_arg.isObject() or !array_arg.isArray()) {
+ return globalObject.throw("Arguments must be an array of alternating field names and values", .{});
+ }
+
+ var iter = array_arg.arrayIterator(globalObject);
+ if (iter.len % 2 != 0) {
+ return globalObject.throw("Arguments must be an array of alternating field names and values", .{});
+ }
+
+ var args = try std.ArrayList(JSC.ZigString.Slice).initCapacity(bun.default_allocator, iter.len + 1);
+ defer {
+ for (args.items) |item| {
+ item.deinit();
+ }
+ args.deinit();
+ }
+
+ // Add key as first argument
+ const key_slice = key.toUTF8WithoutRef(bun.default_allocator);
+ defer key_slice.deinit();
+ args.appendAssumeCapacity(key_slice);
+
+ // Add field-value pairs
+ while (iter.next()) |field_js| {
+ // Add field name
+ const field_str = try field_js.toBunString(globalObject);
+ defer field_str.deref();
+ const field_slice = field_str.toUTF8WithoutRef(bun.default_allocator);
+ args.appendAssumeCapacity(field_slice);
+
+ // Add value
+ if (iter.next()) |value_js| {
+ const value_str = try value_js.toBunString(globalObject);
+ defer value_str.deref();
+ const value_slice = value_str.toUTF8WithoutRef(bun.default_allocator);
+ args.appendAssumeCapacity(value_slice);
+ } else {
+ return globalObject.throw("Arguments must be an array of alternating field names and values", .{});
+ }
+ }
+
+ // Send HMSET command
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = "HMSET",
+ .args = .{ .slices = args.items },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send HMSET command", err);
+ };
+ return promise.asValue(globalObject);
+}
+
+pub const bitcount = compile.@"(key: RedisKey)"("bitcount", "BITCOUNT", "key").call;
+pub const dump = compile.@"(key: RedisKey)"("dump", "DUMP", "key").call;
+pub const expiretime = compile.@"(key: RedisKey)"("expiretime", "EXPIRETIME", "key").call;
+pub const getdel = compile.@"(key: RedisKey)"("getdel", "GETDEL", "key").call;
+pub const getex = compile.@"(key: RedisKey)"("getex", "GETEX", "key").call;
+pub const hgetall = compile.@"(key: RedisKey)"("hgetall", "HGETALL", "key").call;
+pub const hkeys = compile.@"(key: RedisKey)"("hkeys", "HKEYS", "key").call;
+pub const hlen = compile.@"(key: RedisKey)"("hlen", "HLEN", "key").call;
+pub const hvals = compile.@"(key: RedisKey)"("hvals", "HVALS", "key").call;
+pub const keys = compile.@"(key: RedisKey)"("keys", "KEYS", "key").call;
+pub const llen = compile.@"(key: RedisKey)"("llen", "LLEN", "key").call;
+pub const lpop = compile.@"(key: RedisKey)"("lpop", "LPOP", "key").call;
+pub const persist = compile.@"(key: RedisKey)"("persist", "PERSIST", "key").call;
+pub const pexpiretime = compile.@"(key: RedisKey)"("pexpiretime", "PEXPIRETIME", "key").call;
+pub const pttl = compile.@"(key: RedisKey)"("pttl", "PTTL", "key").call;
+pub const rpop = compile.@"(key: RedisKey)"("rpop", "RPOP", "key").call;
+pub const scard = compile.@"(key: RedisKey)"("scard", "SCARD", "key").call;
+pub const strlen = compile.@"(key: RedisKey)"("strlen", "STRLEN", "key").call;
+pub const @"type" = compile.@"(key: RedisKey)"("type", "TYPE", "key").call;
+pub const zcard = compile.@"(key: RedisKey)"("zcard", "ZCARD", "key").call;
+pub const zpopmax = compile.@"(key: RedisKey)"("zpopmax", "ZPOPMAX", "key").call;
+pub const zpopmin = compile.@"(key: RedisKey)"("zpopmin", "ZPOPMIN", "key").call;
+pub const zrandmember = compile.@"(key: RedisKey)"("zrandmember", "ZRANDMEMBER", "key").call;
+pub const ping = compile.@"(key: RedisKey)"("ping", "PING", "message").call;
+
+pub const append = compile.@"(key: RedisKey, value: RedisValue)"("append", "APPEND", "key", "value").call;
+pub const getset = compile.@"(key: RedisKey, value: RedisValue)"("getset", "GETSET", "key", "value").call;
+pub const lpush = compile.@"(key: RedisKey, value: RedisValue, ...args: RedisValue)"("lpush", "LPUSH").call;
+pub const lpushx = compile.@"(key: RedisKey, value: RedisValue, ...args: RedisValue)"("lpushx", "LPUSHX").call;
+pub const pfadd = compile.@"(key: RedisKey, value: RedisValue)"("pfadd", "PFADD", "key", "value").call;
+pub const rpush = compile.@"(key: RedisKey, value: RedisValue, ...args: RedisValue)"("rpush", "RPUSH").call;
+pub const rpushx = compile.@"(key: RedisKey, value: RedisValue, ...args: RedisValue)"("rpushx", "RPUSHX").call;
+pub const setnx = compile.@"(key: RedisKey, value: RedisValue)"("setnx", "SETNX", "key", "value").call;
+pub const zscore = compile.@"(key: RedisKey, value: RedisValue)"("zscore", "ZSCORE", "key", "value").call;
+
+pub const del = compile.@"(key: RedisKey, ...args: RedisKey[])"("del", "DEL", "key").call;
+pub const mget = compile.@"(key: RedisKey, ...args: RedisKey[])"("mget", "MGET", "key").call;
+
+pub const publish = compile.@"(...strings: string[])"("publish", "PUBLISH").call;
+pub const script = compile.@"(...strings: string[])"("script", "SCRIPT").call;
+pub const select = compile.@"(...strings: string[])"("select", "SELECT").call;
+pub const spublish = compile.@"(...strings: string[])"("spublish", "SPUBLISH").call;
+pub const smove = compile.@"(...strings: string[])"("smove", "SMOVE").call;
+pub const substr = compile.@"(...strings: string[])"("substr", "SUBSTR").call;
+pub const hstrlen = compile.@"(...strings: string[])"("hstrlen", "HSTRLEN").call;
+pub const zrank = compile.@"(...strings: string[])"("zrank", "ZRANK").call;
+pub const zrevrank = compile.@"(...strings: string[])"("zrevrank", "ZREVRANK").call;
+pub const subscribe = compile.@"(...strings: string[])"("subscribe", "SUBSCRIBE").call;
+pub const psubscribe = compile.@"(...strings: string[])"("psubscribe", "PSUBSCRIBE").call;
+pub const unsubscribe = compile.@"(...strings: string[])"("unsubscribe", "UNSUBSCRIBE").call;
+pub const punsubscribe = compile.@"(...strings: string[])"("punsubscribe", "PUNSUBSCRIBE").call;
+pub const pubsub = compile.@"(...strings: string[])"("pubsub", "PUBSUB").call;
+
+// publish(channel: RedisValue, message: RedisValue)
+// script(subcommand: "LOAD", script: RedisValue)
+// select(index: number | string)
+// spublish(shardchannel: RedisValue, message: RedisValue)
+// smove(source: RedisKey, destination: RedisKey, member: RedisValue)
+// substr(key: RedisKey, start: number, end: number)` // Deprecated alias for getrang
+// hstrlen(key: RedisKey, field: RedisValue)
+// zrank(key: RedisKey, member: RedisValue)
+// zrevrank(key: RedisKey, member: RedisValue)
+// zscore(key: RedisKey, member: RedisValue)
+
+// cluster(subcommand: "KEYSLOT", key: RedisKey)
+const JSFunction = fn (*JSValkeyClient, *JSC.JSGlobalObject, *JSC.CallFrame) bun.JSError!JSValue;
+
+const compile = struct {
+ pub fn @"(key: RedisKey)"(
+ comptime name: []const u8,
+ comptime command: []const u8,
+ comptime arg0_name: []const u8,
+ ) type {
+ return struct {
+ pub fn call(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType(name, arg0_name, "string or buffer");
+ };
+ defer key.deinit();
+
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = command,
+ .args = .{ .args = &.{key} },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send " ++ command, err);
+ };
+ return promise.asValue(globalObject);
+ }
+ };
+ }
+
+ pub fn @"(key: RedisKey, ...args: RedisKey[])"(
+ comptime name: []const u8,
+ comptime command: []const u8,
+ comptime arg0_name: []const u8,
+ ) type {
+ return struct {
+ pub fn call(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ if (callframe.argument(0).isUndefinedOrNull()) {
+ return globalObject.throwMissingArgumentsValue(&.{arg0_name});
+ }
+
+ const arguments = callframe.arguments();
+ var args = try std.ArrayList(JSArgument).initCapacity(bun.default_allocator, arguments.len);
+ defer {
+ for (args.items) |*item| {
+ item.deinit();
+ }
+ args.deinit();
+ }
+
+ for (arguments) |arg| {
+ if (arg.isUndefinedOrNull()) {
+ continue;
+ }
+
+ const another = (try fromJS(globalObject, arg)) orelse {
+ return globalObject.throwInvalidArgumentType(name, "additional arguments", "string or buffer");
+ };
+ try args.append(another);
+ }
+
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = command,
+ .args = .{ .args = args.items },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send " ++ command, err);
+ };
+ return promise.asValue(globalObject);
+ }
+ };
+ }
+ pub fn @"(key: RedisKey, value: RedisValue)"(
+ comptime name: []const u8,
+ comptime command: []const u8,
+ comptime arg0_name: []const u8,
+ comptime arg1_name: []const u8,
+ ) type {
+ return struct {
+ pub fn call(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ const key = (try fromJS(globalObject, callframe.argument(0))) orelse {
+ return globalObject.throwInvalidArgumentType(name, arg0_name, "string or buffer");
+ };
+ defer key.deinit();
+ const value = (try fromJS(globalObject, callframe.argument(1))) orelse {
+ return globalObject.throwInvalidArgumentType(name, arg1_name, "string or buffer");
+ };
+ defer value.deinit();
+
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = command,
+ .args = .{ .args = &.{ key, value } },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send " ++ command, err);
+ };
+ return promise.asValue(globalObject);
+ }
+ };
+ }
+
+ pub fn @"(...strings: string[])"(
+ comptime name: []const u8,
+ comptime command: []const u8,
+ ) type {
+ return struct {
+ pub fn call(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ var args = try std.ArrayList(JSArgument).initCapacity(bun.default_allocator, callframe.arguments().len);
+ defer {
+ for (args.items) |*item| {
+ item.deinit();
+ }
+ args.deinit();
+ }
+
+ for (callframe.arguments()) |arg| {
+ const another = (try fromJS(globalObject, arg)) orelse {
+ return globalObject.throwInvalidArgumentType(name, "additional arguments", "string or buffer");
+ };
+ try args.append(another);
+ }
+
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = command,
+ .args = .{ .args = args.items },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send " ++ command, err);
+ };
+ return promise.asValue(globalObject);
+ }
+ };
+ }
+
+ pub fn @"(key: RedisKey, value: RedisValue, ...args: RedisValue)"(
+ comptime name: []const u8,
+ comptime command: []const u8,
+ ) type {
+ return struct {
+ pub fn call(this: *JSValkeyClient, globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSValue {
+ var args = try std.ArrayList(JSArgument).initCapacity(bun.default_allocator, callframe.arguments().len);
+ defer {
+ for (args.items) |*item| {
+ item.deinit();
+ }
+ args.deinit();
+ }
+
+ for (callframe.arguments()) |arg| {
+ if (arg.isUndefinedOrNull()) {
+ continue;
+ }
+
+ const another = (try fromJS(globalObject, arg)) orelse {
+ return globalObject.throwInvalidArgumentType(name, "additional arguments", "string or buffer");
+ };
+ try args.append(another);
+ }
+
+ const promise = this.send(
+ globalObject,
+ callframe.this(),
+ &.{
+ .command = command,
+ .args = .{ .args = args.items },
+ },
+ ) catch |err| {
+ return protocol.valkeyErrorToJS(globalObject, "Failed to send " ++ command, err);
+ };
+ return promise.asValue(globalObject);
+ }
+ };
+ }
+};
+
+const JSValkeyClient = @import("./js_valkey.zig").JSValkeyClient;
+const bun = @import("root").bun;
+const JSC = bun.JSC;
+const valkey = bun.valkey;
+const protocol = valkey.protocol;
+const JSValue = JSC.JSValue;
+const Command = valkey.Command;
+const std = @import("std");
+const Slice = JSC.ZigString.Slice;
+
+const JSArgument = JSC.Node.BlobOrStringOrBuffer;
+
+fn fromJS(globalObject: *JSC.JSGlobalObject, value: JSValue) !?JSArgument {
+ if (value == .undefined or value == .null) {
+ return null;
+ }
+
+ if (value.isNumber()) {
+ // Allow numbers to be passed as strings.
+ const str = value.toString(globalObject);
+ if (globalObject.hasException()) {
+ @branchHint(.unlikely);
+ return error.JSError;
+ }
+
+ return try JSArgument.fromJSMaybeFile(globalObject, bun.default_allocator, str.toJS(), true);
+ }
+
+ return try JSArgument.fromJSMaybeFile(globalObject, bun.default_allocator, value, false);
+}
diff --git a/src/valkey/valkey.zig b/src/valkey/valkey.zig
new file mode 100644
index 0000000000..81e8327bda
--- /dev/null
+++ b/src/valkey/valkey.zig
@@ -0,0 +1,973 @@
+// Entry point for Valkey client
+//
+// This file contains the core Valkey client implementation with protocol handling
+
+pub const ValkeyContext = @import("ValkeyContext.zig");
+
+/// Connection flags to track Valkey client state
+pub const ConnectionFlags = packed struct {
+ is_authenticated: bool = false,
+ is_manually_closed: bool = false,
+ enable_offline_queue: bool = true,
+ needs_to_open_socket: bool = true,
+ enable_auto_reconnect: bool = true,
+ is_reconnecting: bool = false,
+ auto_pipelining: bool = true,
+ finalized: bool = false,
+};
+
+/// Valkey connection status
+pub const Status = enum {
+ disconnected,
+ connecting,
+ connected,
+ failed,
+};
+
+pub const Command = @import("./ValkeyCommand.zig");
+
+/// Valkey protocol types (standalone, TLS, Unix socket)
+pub const Protocol = enum {
+ standalone,
+ standalone_unix,
+ standalone_tls,
+ standalone_tls_unix,
+
+ pub const Map = bun.ComptimeStringMap(Protocol, .{
+ .{ "valkey", .standalone },
+ .{ "valkeys", .standalone_tls },
+ .{ "valkey+tls", .standalone_tls },
+ .{ "valkey+unix", .standalone_unix },
+ .{ "valkey+tls+unix", .standalone_tls_unix },
+ .{ "redis", .standalone },
+ .{ "rediss", .standalone_tls },
+ .{ "redis+tls", .standalone_tls },
+ .{ "redis+unix", .standalone_unix },
+ .{ "redis+tls+unix", .standalone_tls_unix },
+ });
+
+ pub fn isTLS(self: Protocol) bool {
+ return switch (self) {
+ .standalone_tls, .standalone_tls_unix => true,
+ else => false,
+ };
+ }
+
+ pub fn isUnix(self: Protocol) bool {
+ return switch (self) {
+ .standalone_unix, .standalone_tls_unix => true,
+ else => false,
+ };
+ }
+};
+
+pub const TLS = union(enum) {
+ none,
+ enabled,
+ custom: JSC.API.ServerConfig.SSLConfig,
+
+ pub fn deinit(this: *TLS) void {
+ switch (this.*) {
+ .custom => |*ssl_config| ssl_config.deinit(),
+ else => {},
+ }
+ }
+
+ pub fn rejectUnauthorized(this: *const TLS, vm: *JSC.VirtualMachine) bool {
+ return switch (this.*) {
+ .custom => |*ssl_config| ssl_config.reject_unauthorized != 0,
+ .enabled => vm.getTLSRejectUnauthorized(),
+ else => false,
+ };
+ }
+};
+
+/// Connection options for Valkey client
+pub const Options = struct {
+ idle_timeout_ms: u32 = 30000,
+ connection_timeout_ms: u32 = 10000,
+ enable_auto_reconnect: bool = true,
+ max_retries: u32 = 20,
+ enable_offline_queue: bool = true,
+ enable_auto_pipelining: bool = true,
+ enable_debug_logging: bool = false,
+
+ tls: TLS = .none,
+};
+
+pub const Address = union(enum) {
+ unix: []const u8,
+ host: struct {
+ host: []const u8,
+ port: u16,
+ },
+
+ pub fn connect(this: *const Address, client: *ValkeyClient, ctx: *bun.uws.SocketContext, is_tls: bool) !uws.AnySocket {
+ switch (is_tls) {
+ inline else => |tls| {
+ const SocketType = if (tls) uws.SocketTLS else uws.SocketTCP;
+ const union_field = if (tls) "SocketTLS" else "SocketTCP";
+ switch (this.*) {
+ .unix => |path| {
+ return @unionInit(uws.AnySocket, union_field, try SocketType.connectUnixAnon(
+ path,
+ ctx,
+ client,
+ false,
+ ));
+ },
+ .host => |h| {
+ return @unionInit(uws.AnySocket, union_field, try SocketType.connectAnon(
+ h.host,
+ h.port,
+ ctx,
+ client,
+ false,
+ ));
+ },
+ }
+ },
+ }
+ }
+};
+
+/// Core Valkey client implementation
+pub const ValkeyClient = struct {
+ socket: uws.AnySocket,
+ status: Status = Status.connecting,
+
+ // Buffer management
+ write_buffer: bun.OffsetByteList = .{},
+ read_buffer: bun.OffsetByteList = .{},
+
+ /// In-flight commands, after the data has been written to the network socket
+ in_flight: Command.PromisePair.Queue,
+
+ /// Commands that are waiting to be sent to the server. When pipelining is implemented, this usually will be empty.
+ queue: Command.Entry.Queue,
+
+ // Connection parameters
+ password: []const u8 = "",
+ username: []const u8 = "",
+ database: u32 = 0,
+ address: Address,
+
+ connection_strings: []u8 = &.{},
+
+ // TLS support
+ tls: TLS = .none,
+
+ // Timeout and reconnection management
+ idle_timeout_interval_ms: u32 = 0,
+ connection_timeout_ms: u32 = 0,
+ retry_attempts: u32 = 0,
+ max_retries: u32 = 20, // Maximum retry attempts
+
+ flags: ConnectionFlags = .{},
+ allocator: std.mem.Allocator,
+
+ // Auto-pipelining
+ auto_flusher: AutoFlusher = .{},
+
+ vm: *JSC.VirtualMachine,
+
+ /// Clean up resources used by the Valkey client
+ pub fn deinit(this: *@This(), globalObjectOrFinalizing: ?*JSC.JSGlobalObject) void {
+ var pending = this.in_flight;
+ this.in_flight = .init(this.allocator);
+ defer pending.deinit();
+ var commands = this.queue;
+ this.queue = .init(this.allocator);
+ defer commands.deinit();
+
+ if (globalObjectOrFinalizing) |globalThis| {
+ const object = protocol.valkeyErrorToJS(globalThis, "Connection closed", protocol.RedisError.ConnectionClosed);
+ for (pending.readableSlice(0)) |pair| {
+ var pair_ = pair;
+ pair_.rejectCommand(globalThis, object);
+ }
+
+ for (commands.readableSlice(0)) |cmd| {
+ var offline_cmd = cmd;
+ offline_cmd.promise.reject(globalThis, object);
+ offline_cmd.deinit(this.allocator);
+ }
+ } else {
+ // finalizing. we can't call into JS.
+ for (pending.readableSlice(0)) |pair| {
+ var pair_ = pair;
+ pair_.promise.deinit();
+ }
+
+ for (commands.readableSlice(0)) |cmd| {
+ var offline_cmd = cmd;
+ offline_cmd.promise.deinit();
+ offline_cmd.deinit(this.allocator);
+ }
+ }
+
+ this.allocator.free(this.connection_strings);
+ this.write_buffer.deinit(this.allocator);
+ this.read_buffer.deinit(this.allocator);
+ this.tls.deinit();
+ this.unregisterAutoFlusher();
+ }
+
+ // ** Auto-pipelining **
+ fn registerAutoFlusher(this: *ValkeyClient, vm: *JSC.VirtualMachine) void {
+ if (!this.auto_flusher.registered) {
+ AutoFlusher.registerDeferredMicrotaskWithTypeUnchecked(@This(), this, vm);
+ this.auto_flusher.registered = true;
+ }
+ }
+
+ fn unregisterAutoFlusher(this: *ValkeyClient) void {
+ if (this.auto_flusher.registered) {
+ AutoFlusher.unregisterDeferredMicrotaskWithType(@This(), this, this.vm);
+ this.auto_flusher.registered = false;
+ }
+ }
+
+ // Drain auto-pipelined commands
+ pub fn onAutoFlush(this: *@This()) bool {
+ // Don't process if not connected or already processing
+ if (this.status != .connected) {
+ this.auto_flusher.registered = false;
+ return false;
+ }
+
+ this.ref();
+ defer this.deref();
+
+ // Start draining the command queue
+ var have_more = false;
+ var total_bytelength: usize = 0;
+
+ const pipelineable_commands: []Command.Entry = brk: {
+ var to_process = @constCast(this.queue.readableSlice(0));
+ var total: usize = 0;
+ for (to_process) |*command| {
+ if (!command.meta.supports_auto_pipelining) {
+ break;
+ }
+
+ this.in_flight.writeItem(.{
+ .meta = command.meta,
+ .promise = command.promise,
+ }) catch bun.outOfMemory();
+
+ total += 1;
+ total_bytelength += command.serialized_data.len;
+ }
+ break :brk to_process[0..total];
+ };
+
+ this.write_buffer.byte_list.ensureUnusedCapacity(this.allocator, total_bytelength) catch bun.outOfMemory();
+ for (pipelineable_commands) |*command| {
+ this.write_buffer.write(this.allocator, command.serialized_data) catch bun.outOfMemory();
+ // Free the serialized data since we've copied it to the write buffer
+ this.allocator.free(command.serialized_data);
+ }
+
+ this.queue.discard(pipelineable_commands.len);
+
+ _ = this.flushData();
+
+ have_more = this.queue.readableLength() > 0;
+ this.auto_flusher.registered = have_more;
+
+ // Return true if we should schedule another flush
+ return have_more;
+ }
+ // ** End of auto-pipelining **
+
+ /// Get the appropriate timeout interval based on connection state
+ pub fn getTimeoutInterval(this: *const ValkeyClient) u32 {
+ return switch (this.status) {
+ .connected => this.idle_timeout_interval_ms,
+ .failed => 0,
+ else => this.connection_timeout_ms,
+ };
+ }
+
+ pub fn hasAnyPendingCommands(this: *const ValkeyClient) bool {
+ return this.in_flight.readableLength() > 0 or
+ this.queue.readableLength() > 0 or
+ this.write_buffer.len() > 0 or
+ this.read_buffer.len() > 0;
+ }
+
+ /// Calculate reconnect delay with exponential backoff
+ pub fn getReconnectDelay(this: *const ValkeyClient) u32 {
+ const base_delay: u32 = 50; // Base delay in ms
+ const max_delay: u32 = 2000; // Max delay in ms
+
+ // Fixed backoff calculation to avoid integer overflow
+ if (this.retry_attempts == 0) return base_delay;
+
+ // Cap at 10 attempts for backoff calculation to avoid overflow
+ const attempt = @min(this.retry_attempts, 10);
+
+ // Use a safer exponential backoff calculation
+ var delay: u32 = base_delay;
+ var i: u32 = 1;
+ while (i < attempt) : (i += 1) {
+ // Double the delay up to max_delay
+ delay = @min(delay * 2, max_delay);
+ }
+
+ return delay;
+ }
+
+ /// Reject all pending commands with an error
+ fn rejectAllPendingCommands(pending_ptr: *Command.PromisePair.Queue, entries_ptr: *Command.Entry.Queue, globalThis: *JSC.JSGlobalObject, allocator: std.mem.Allocator, jsvalue: JSC.JSValue) void {
+ var pending = pending_ptr.*;
+ var entries = entries_ptr.*;
+ defer pending.deinit();
+ defer entries.deinit();
+ pending_ptr.* = .init(allocator);
+ entries_ptr.* = .init(allocator);
+
+ // Reject commands in the command queue
+ for (pending.readableSlice(0)) |item| {
+ var command_pair = item;
+ command_pair.rejectCommand(globalThis, jsvalue);
+ }
+
+ // Reject commands in the offline queue
+ for (entries.readableSlice(0)) |item| {
+ var cmd = item;
+ cmd.promise.reject(globalThis, jsvalue);
+ cmd.deinit(allocator);
+ }
+ }
+
+ /// Flush pending data to the socket
+ pub fn flushData(this: *ValkeyClient) bool {
+ const chunk = this.write_buffer.remaining();
+ if (chunk.len == 0) return false;
+ const wrote = this.socket.write(chunk, false);
+ if (wrote > 0) {
+ this.write_buffer.consume(@intCast(wrote));
+ }
+ return this.write_buffer.len() > 0;
+ }
+
+ const DeferredFailure = struct {
+ message: []const u8,
+ err: protocol.RedisError,
+ globalThis: *JSC.JSGlobalObject,
+ in_flight: Command.PromisePair.Queue,
+ queue: Command.Entry.Queue,
+
+ pub fn run(this: *DeferredFailure) void {
+ defer {
+ bun.default_allocator.free(this.message);
+ bun.destroy(this);
+ }
+ debug("running deferred failure", .{});
+ const err = protocol.valkeyErrorToJS(this.globalThis, this.message, this.err);
+ rejectAllPendingCommands(&this.in_flight, &this.queue, this.globalThis, bun.default_allocator, err);
+ }
+
+ pub fn enqueue(this: *DeferredFailure) void {
+ debug("enqueueing deferred failure", .{});
+ const managed_task = JSC.ManagedTask.New(DeferredFailure, run).init(this);
+ JSC.VirtualMachine.get().eventLoop().enqueueTask(managed_task);
+ }
+ };
+
+ /// Mark the connection as failed with error message
+ pub fn fail(this: *ValkeyClient, message: []const u8, err: protocol.RedisError) void {
+ debug("failed: {s}: {s}", .{ message, @errorName(err) });
+ if (this.status == .failed) return;
+
+ if (this.flags.finalized) {
+ // We can't run promises inside finalizers.
+ if (this.queue.count + this.in_flight.count > 0) {
+ const vm = this.vm;
+ const deferred_failrue = bun.new(DeferredFailure, .{
+ // This memory is not owned by us.
+ .message = bun.default_allocator.dupe(u8, message) catch bun.outOfMemory(),
+
+ .err = err,
+ .globalThis = vm.global,
+ .in_flight = this.in_flight,
+ .queue = this.queue,
+ });
+ this.in_flight = .init(this.allocator);
+ this.queue = .init(this.allocator);
+ deferred_failrue.enqueue();
+ }
+
+ // Allow the finalizer to call .close()
+ return;
+ }
+
+ const globalThis = this.globalObject();
+ this.failWithJSValue(globalThis, protocol.valkeyErrorToJS(globalThis, message, err));
+ }
+
+ pub fn failWithJSValue(this: *ValkeyClient, globalThis: *JSC.JSGlobalObject, jsvalue: JSC.JSValue) void {
+ this.status = .failed;
+ rejectAllPendingCommands(&this.in_flight, &this.queue, globalThis, this.allocator, jsvalue);
+
+ if (!this.flags.is_authenticated) {
+ this.flags.is_manually_closed = true;
+ this.close();
+ }
+ }
+
+ pub fn close(this: *ValkeyClient) void {
+ const socket = this.socket;
+ this.socket = .{ .SocketTCP = .detached };
+ socket.close();
+ }
+
+ /// Handle connection closed event
+ pub fn onClose(this: *ValkeyClient) void {
+ this.unregisterAutoFlusher();
+ this.write_buffer.deinit(this.allocator);
+
+ // If manually closing, don't attempt to reconnect
+ if (this.flags.is_manually_closed) {
+ debug("skip reconnecting since the connection is manually closed", .{});
+ this.fail("Connection closed", protocol.RedisError.ConnectionClosed);
+ this.onValkeyClose();
+ return;
+ }
+
+ // If auto reconnect is disabled, just fail
+ if (!this.flags.enable_auto_reconnect) {
+ debug("skip reconnecting since auto reconnect is disabled", .{});
+ this.fail("Connection closed", protocol.RedisError.ConnectionClosed);
+ this.onValkeyClose();
+ return;
+ }
+
+ // Calculate reconnection delay with exponential backoff
+ this.retry_attempts += 1;
+ const delay_ms = this.getReconnectDelay();
+
+ if (delay_ms == 0 or this.retry_attempts > this.max_retries) {
+ debug("Max retries reached or retry strategy returned 0, giving up reconnection", .{});
+ this.fail("Max reconnection attempts reached", protocol.RedisError.ConnectionClosed);
+ this.onValkeyClose();
+ return;
+ }
+
+ debug("reconnect in {d}ms (attempt {d}/{d})", .{ delay_ms, this.retry_attempts, this.max_retries });
+
+ this.status = .disconnected;
+ this.flags.is_reconnecting = true;
+
+ // Signal reconnect timer should be started
+ this.onValkeyReconnect();
+ }
+
+ pub fn sendNextCommand(this: *ValkeyClient) void {
+ if (this.write_buffer.remaining().len == 0 and this.flags.is_authenticated) {
+ if (this.queue.readableLength() > 0) {
+ // Check the command at the head of the queue
+ const flags = &this.queue.peekItem(0).meta;
+
+ if (!flags.supports_auto_pipelining) {
+ // Head is non-pipelineable. Try to drain it serially if nothing is in-flight.
+ if (this.in_flight.readableLength() == 0) {
+ _ = this.drain(); // Send the single non-pipelineable command
+
+ // After draining, check if the *new* head is pipelineable and schedule flush if needed.
+ // This covers sequences like NON_PIPE -> PIPE -> PIPE ...
+ if (this.queue.readableLength() > 0 and this.queue.peekItem(0).meta.supports_auto_pipelining) {
+ this.registerAutoFlusher(this.vm);
+ }
+ } else {
+ // Non-pipelineable command is blocked by in-flight commands. Do nothing, wait for in-flight to finish.
+ }
+ } else {
+ // Head is pipelineable. Register the flusher to batch it with others.
+ this.registerAutoFlusher(this.vm);
+ }
+ } else if (this.in_flight.readableLength() == 0) {
+ // Without auto pipelining, wait for in-flight to empty before draining
+ _ = this.drain();
+ }
+ }
+
+ _ = this.flushData();
+ }
+
+ /// Process data received from socket
+ pub fn onData(this: *ValkeyClient, data: []const u8) void {
+ // Caller refs / derefs.
+
+ // Path 1: Buffer already has data, append and process from buffer
+ if (this.read_buffer.remaining().len > 0) {
+ this.read_buffer.write(this.allocator, data) catch @panic("failed to write to read buffer");
+
+ // Process as many complete messages from the buffer as possible
+ while (true) {
+ const remaining_buffer = this.read_buffer.remaining();
+ if (remaining_buffer.len == 0) {
+ break; // Buffer processed completely
+ }
+
+ var reader = protocol.ValkeyReader.init(remaining_buffer);
+ const before_read_pos = reader.pos;
+
+ var value = reader.readValue(this.allocator) catch |err| {
+ if (err == error.InvalidResponse) {
+ // Need more data in the buffer, wait for next onData call
+ if (comptime bun.Environment.allow_assert) {
+ debug("read_buffer: needs more data ({d} bytes available)", .{remaining_buffer.len});
+ }
+ return;
+ } else {
+ this.fail("Failed to read data (buffer path)", err);
+ return;
+ }
+ };
+ defer value.deinit(this.allocator);
+
+ const bytes_consumed = reader.pos - before_read_pos;
+ if (bytes_consumed == 0 and remaining_buffer.len > 0) {
+ this.fail("Parser consumed 0 bytes unexpectedly (buffer path)", error.InvalidResponse);
+ return;
+ }
+
+ this.read_buffer.consume(@truncate(bytes_consumed));
+
+ var value_to_handle = value; // Use temp var for defer
+ this.handleResponse(&value_to_handle) catch |err| {
+ this.fail("Failed to handle response (buffer path)", err);
+ return;
+ };
+
+ if (this.status == .disconnected or this.status == .failed) {
+ return;
+ }
+ this.sendNextCommand();
+ }
+ return; // Finished processing buffered data for now
+ }
+
+ // Path 2: Buffer is empty, try processing directly from stack 'data'
+ var current_data_slice = data; // Create a mutable view of the incoming data
+ while (current_data_slice.len > 0) {
+ var reader = protocol.ValkeyReader.init(current_data_slice);
+ const before_read_pos = reader.pos;
+
+ var value = reader.readValue(this.allocator) catch |err| {
+ if (err == error.InvalidResponse) {
+ // Partial message encountered on the stack-allocated path.
+ // Copy the *remaining* part of the stack data to the heap buffer
+ // and wait for more data.
+ if (comptime bun.Environment.allow_assert) {
+ debug("read_buffer: partial message on stack ({d} bytes), switching to buffer", .{current_data_slice.len - before_read_pos});
+ }
+ this.read_buffer.write(this.allocator, current_data_slice[before_read_pos..]) catch @panic("failed to write remaining stack data to buffer");
+ return; // Exit onData, next call will use the buffer path
+ } else {
+ // Any other error is fatal
+ this.fail("Failed to read data (stack path)", err);
+ return;
+ }
+ };
+ // Successfully read a full message from the stack data
+ defer value.deinit(this.allocator);
+
+ const bytes_consumed = reader.pos - before_read_pos;
+ if (bytes_consumed == 0) {
+ // This case should ideally not happen if readValue succeeded and slice wasn't empty
+ this.fail("Parser consumed 0 bytes unexpectedly (stack path)", error.InvalidResponse);
+ return;
+ }
+
+ // Advance the view into the stack data slice for the next iteration
+ current_data_slice = current_data_slice[bytes_consumed..];
+
+ // Handle the successfully parsed response
+ var value_to_handle = value; // Use temp var for defer
+ this.handleResponse(&value_to_handle) catch |err| {
+ this.fail("Failed to handle response (stack path)", err);
+ return;
+ };
+
+ // Check connection status after handling
+ if (this.status == .disconnected or this.status == .failed) {
+ return;
+ }
+
+ // After handling a response, try to send the next command
+ this.sendNextCommand();
+
+ // Loop continues with the remainder of current_data_slice
+ }
+
+ // If the loop finishes, the entire 'data' was processed without needing the buffer.
+ }
+
+ fn handleHelloResponse(this: *ValkeyClient, value: *protocol.RESPValue) void {
+ debug("Processing HELLO response", .{});
+
+ switch (value.*) {
+ .Error => |err| {
+ this.fail(err, protocol.RedisError.AuthenticationFailed);
+ return;
+ },
+ .SimpleString => |str| {
+ if (std.mem.eql(u8, str, "OK")) {
+ this.status = .connected;
+ this.flags.is_authenticated = true;
+ this.onValkeyConnect(value);
+ return;
+ }
+ this.fail("Authentication failed (unexpected response)", protocol.RedisError.AuthenticationFailed);
+
+ return;
+ },
+ .Map => |map| {
+ // This is the HELLO response map
+ debug("Got HELLO response map with {d} entries", .{map.len});
+
+ // Process the Map response - find the protocol version
+ for (map) |*entry| {
+ switch (entry.key) {
+ .SimpleString => |key| {
+ if (std.mem.eql(u8, key, "proto")) {
+ if (entry.value == .Integer) {
+ const proto_version = entry.value.Integer;
+ debug("Server protocol version: {d}", .{proto_version});
+ if (proto_version != 3) {
+ this.fail("Server does not support RESP3", protocol.RedisError.UnsupportedProtocol);
+ return;
+ }
+ }
+ }
+ },
+ else => {},
+ }
+ }
+
+ // Authentication successful via HELLO
+ this.status = .connected;
+ this.flags.is_authenticated = true;
+ this.onValkeyConnect(value);
+ return;
+ },
+ else => {
+ this.fail("Authentication failed with unexpected response", protocol.RedisError.AuthenticationFailed);
+ return;
+ },
+ }
+ }
+
+ /// Handle Valkey protocol response
+ fn handleResponse(this: *ValkeyClient, value: *protocol.RESPValue) !void {
+ debug("onData() {any}", .{value.*});
+ // Special handling for the initial HELLO response
+ if (!this.flags.is_authenticated) {
+ this.handleHelloResponse(value);
+
+ // We've handled the HELLO response without consuming anything from the command queue
+ return;
+ }
+
+ // For regular commands, get the next command+promise pair from the queue
+ var pair = this.in_flight.readItem() orelse {
+ debug("Received response but no promise in queue", .{});
+ return;
+ };
+
+ const meta = pair.meta;
+
+ // Handle the response based on command type
+ if (meta.return_as_bool) {
+ // EXISTS returns 1 if key exists, 0 if not - we convert to boolean
+ if (value.* == .Integer) {
+ const int_value = value.Integer;
+ value.* = .{ .Boolean = int_value > 0 };
+ }
+ }
+
+ // Resolve the promise with the potentially transformed value
+ var promise_ptr = &pair.promise;
+ const globalThis = this.globalObject();
+ const loop = this.vm.eventLoop();
+
+ loop.enter();
+ defer loop.exit();
+
+ if (value.* == .Error) {
+ promise_ptr.reject(globalThis, value.toJS(globalThis) catch |err| globalThis.takeError(err));
+ } else {
+ promise_ptr.resolve(globalThis, value);
+ }
+ }
+
+ /// Send authentication command to Valkey server
+ fn authenticate(this: *ValkeyClient) void {
+ // First send HELLO command for RESP3 protocol
+ debug("Sending HELLO 3 command", .{});
+
+ var hello_args_buf: [4][]const u8 = .{ "3", "AUTH", "", "" };
+ var hello_args: []const []const u8 = undefined;
+
+ if (this.username.len > 0 or this.password.len > 0) {
+ hello_args_buf[0] = "3";
+ hello_args_buf[1] = "AUTH";
+
+ if (this.username.len > 0) {
+ hello_args_buf[2] = this.username;
+ hello_args_buf[3] = this.password;
+ } else {
+ hello_args_buf[2] = "default";
+ hello_args_buf[3] = this.password;
+ }
+
+ hello_args = hello_args_buf[0..4];
+ } else {
+ hello_args = hello_args_buf[0..1];
+ }
+
+ // Format and send the HELLO command without adding to command queue
+ // We'll handle this response specially in handleResponse
+ var hello_cmd = Command{
+ .command = "HELLO",
+ .args = .{ .raw = hello_args },
+ };
+
+ hello_cmd.write(this.writer()) catch |err| {
+ this.fail("Failed to write HELLO command", err);
+ return;
+ };
+
+ // If using a specific database, send SELECT command
+ if (this.database > 0) {
+ var int_buf: [64]u8 = undefined;
+ const db_str = std.fmt.bufPrintZ(&int_buf, "{d}", .{this.database}) catch unreachable;
+ var select_cmd = Command{
+ .command = "SELECT",
+ .args = .{ .raw = &[_][]const u8{db_str} },
+ };
+ select_cmd.write(this.writer()) catch |err| {
+ this.fail("Failed to write SELECT command", err);
+ return;
+ };
+ }
+ }
+
+ /// Handle socket open event
+ pub fn onOpen(this: *ValkeyClient, socket: uws.AnySocket) void {
+ this.socket = socket;
+ this.write_buffer.deinit(this.allocator);
+ this.read_buffer.deinit(this.allocator);
+ this.start();
+ }
+
+ /// Start the connection process
+ fn start(this: *ValkeyClient) void {
+ this.authenticate();
+ _ = this.flushData();
+ }
+
+ /// Process queued commands in the offline queue
+ pub fn drain(this: *ValkeyClient) bool {
+ // If there's something in the in-flight queue and the next command
+ // doesn't support pipelining, we should wait for in-flight commands to complete
+ if (this.in_flight.readableLength() > 0) {
+ const queue_slice = this.queue.readableSlice(0);
+ if (queue_slice.len > 0 and !queue_slice[0].meta.supports_auto_pipelining) {
+ return false;
+ }
+ }
+
+ const offline_cmd = this.queue.readItem() orelse return false;
+
+ // Add the promise to the command queue first
+ this.in_flight.writeItem(.{
+ .meta = offline_cmd.meta,
+ .promise = offline_cmd.promise,
+ }) catch bun.outOfMemory();
+ const data = offline_cmd.serialized_data;
+
+ if (this.flags.is_authenticated and this.write_buffer.remaining().len == 0) {
+ // Optimization: avoid cloning the data an extra time.
+ defer this.allocator.free(data);
+
+ const wrote = this.socket.write(data, false);
+ const unwritten = data[@intCast(@max(wrote, 0))..];
+
+ if (unwritten.len > 0) {
+ // Handle incomplete write.
+ this.write_buffer.write(this.allocator, unwritten) catch bun.outOfMemory();
+ }
+
+ return true;
+ }
+
+ // Write the pre-serialized data directly to the output buffer
+ _ = this.write(data) catch bun.outOfMemory();
+ bun.default_allocator.free(data);
+
+ return true;
+ }
+
+ pub fn onWritable(this: *ValkeyClient) void {
+ this.ref();
+ defer this.deref();
+
+ this.sendNextCommand();
+ }
+
+ fn enqueue(this: *ValkeyClient, command: *const Command, promise: *Command.Promise) !void {
+ const can_pipeline = command.meta.supports_auto_pipelining and this.flags.auto_pipelining;
+
+ // For commands that don't support pipelining, we need to wait for the queue to drain completely
+ // before sending the command. This ensures proper order of execution for state-changing commands.
+ const must_wait_for_queue = !command.meta.supports_auto_pipelining and this.queue.readableLength() > 0;
+
+ if (
+ // If there are any pending commands, queue this one
+ this.queue.readableLength() > 0 or
+ // With auto pipelining, we can accept commands regardless of in_flight commands
+ (!can_pipeline and this.in_flight.readableLength() > 0) or
+ // We need authentication before processing commands
+ !this.flags.is_authenticated or
+ // Commands that don't support pipelining must wait for the entire queue to drain
+ must_wait_for_queue or
+ // If can pipeline, we can accept commands regardless of in_flight commands
+ can_pipeline)
+ {
+ // We serialize the bytes in here, so we don't need to worry about the lifetime of the Command itself.
+ try this.queue.writeItem(try Command.Entry.create(this.allocator, command, promise.*));
+
+ // If we're connected and using auto pipelining, schedule a flush
+ if (this.status == .connected and can_pipeline) {
+ this.registerAutoFlusher(this.vm);
+ }
+
+ return;
+ }
+
+ switch (this.status) {
+ .connecting, .connected => command.write(this.writer()) catch {
+ promise.reject(this.globalObject(), this.globalObject().createOutOfMemoryError());
+ return;
+ },
+ else => unreachable,
+ }
+
+ const cmd_pair = Command.PromisePair{
+ .meta = command.meta,
+ .promise = promise.*,
+ };
+
+ // Add to queue with command type
+ try this.in_flight.writeItem(cmd_pair);
+
+ _ = this.flushData();
+ }
+
+ pub fn send(this: *ValkeyClient, globalThis: *JSC.JSGlobalObject, command: *const Command) !*JSC.JSPromise {
+ var promise = Command.Promise.create(globalThis, command.meta);
+
+ const js_promise = promise.promise.get();
+ // Handle disconnected state with offline queue
+ switch (this.status) {
+ .connecting, .connected => {
+ try this.enqueue(command, &promise);
+
+ // Schedule auto-flushing to process this command if pipelining is enabled
+ if (this.flags.auto_pipelining and
+ command.meta.supports_auto_pipelining and
+ this.status == .connected and
+ this.queue.readableLength() > 0)
+ {
+ this.registerAutoFlusher(this.vm);
+ }
+ },
+ .disconnected => {
+ // Only queue if offline queue is enabled
+ if (this.flags.enable_offline_queue) {
+ try this.enqueue(command, &promise);
+ } else {
+ promise.reject(globalThis, globalThis.ERR_REDIS_CONNECTION_CLOSED("Connection is closed and offline queue is disabled", .{}).toJS());
+ }
+ },
+ .failed => {
+ promise.reject(globalThis, globalThis.ERR_REDIS_CONNECTION_CLOSED("Connection has failed", .{}).toJS());
+ },
+ }
+
+ return js_promise;
+ }
+
+ /// Close the Valkey connection
+ pub fn disconnect(this: *ValkeyClient) void {
+ this.flags.is_manually_closed = true;
+ this.unregisterAutoFlusher();
+ if (this.status == .connected or this.status == .connecting) {
+ this.status = .disconnected;
+ this.close();
+ }
+ }
+
+ /// Get a writer for the connected socket
+ pub fn writer(this: *ValkeyClient) std.io.Writer(*ValkeyClient, protocol.RedisError, write) {
+ return .{ .context = this };
+ }
+
+ /// Write data to the socket buffer
+ fn write(this: *ValkeyClient, data: []const u8) !usize {
+ try this.write_buffer.write(this.allocator, data);
+ return data.len;
+ }
+
+ /// Increment reference count
+ pub fn ref(this: *ValkeyClient) void {
+ this.parent().ref();
+ }
+
+ pub fn deref(this: *ValkeyClient) void {
+ this.parent().deref();
+ }
+
+ inline fn parent(this: *ValkeyClient) *JSValkeyClient {
+ return @fieldParentPtr("client", this);
+ }
+
+ inline fn globalObject(this: *ValkeyClient) *JSC.JSGlobalObject {
+ return this.parent().globalObject;
+ }
+
+ pub fn onValkeyConnect(this: *ValkeyClient, value: *protocol.RESPValue) void {
+ this.parent().onValkeyConnect(value);
+ }
+
+ pub fn onValkeyReconnect(this: *ValkeyClient) void {
+ this.parent().onValkeyReconnect();
+ }
+
+ pub fn onValkeyClose(this: *ValkeyClient) void {
+ this.parent().onValkeyClose();
+ }
+
+ pub fn onValkeyTimeout(this: *ValkeyClient) void {
+ this.parent().onValkeyTimeout();
+ }
+};
+
+// Auto-pipelining
+const AutoFlusher = JSC.WebCore.AutoFlusher;
+
+const JSValkeyClient = JSC.API.Valkey;
+
+const JSC = bun.JSC;
+const std = @import("std");
+const bun = @import("root").bun;
+const protocol = @import("valkey_protocol.zig");
+const js_valkey = @import("js_valkey.zig");
+const debug = bun.Output.scoped(.Redis, false);
+const uws = bun.uws;
+const Slice = JSC.ZigString.Slice;
diff --git a/src/valkey/valkey_protocol.zig b/src/valkey/valkey_protocol.zig
new file mode 100644
index 0000000000..7e7e3cfb0e
--- /dev/null
+++ b/src/valkey/valkey_protocol.zig
@@ -0,0 +1,641 @@
+const std = @import("std");
+const bun = @import("root").bun;
+const JSC = bun.JSC;
+const String = bun.String;
+const debug = bun.Output.scoped(.Redis, false);
+
+pub const RedisError = error{
+ AuthenticationFailed,
+ ConnectionClosed,
+ InvalidArgument,
+ InvalidArray,
+ InvalidAttribute,
+ InvalidBigNumber,
+ InvalidBlobError,
+ InvalidBoolean,
+ InvalidBulkString,
+ InvalidCommand,
+ InvalidDouble,
+ InvalidErrorString,
+ InvalidInteger,
+ InvalidMap,
+ InvalidNull,
+ InvalidPush,
+ InvalidResponse,
+ InvalidResponseType,
+ InvalidSet,
+ InvalidSimpleString,
+ InvalidVerbatimString,
+ JSError,
+ OutOfMemory,
+ UnsupportedProtocol,
+ ConnectionTimeout,
+ IdleTimeout,
+};
+
+pub fn valkeyErrorToJS(globalObject: *JSC.JSGlobalObject, message: ?[]const u8, err: RedisError) JSC.JSValue {
+ const error_code: JSC.Error = switch (err) {
+ error.ConnectionClosed => JSC.Error.ERR_REDIS_CONNECTION_CLOSED,
+ error.InvalidResponse => JSC.Error.ERR_REDIS_INVALID_RESPONSE,
+ error.InvalidBulkString => JSC.Error.ERR_REDIS_INVALID_BULK_STRING,
+ error.InvalidArray => JSC.Error.ERR_REDIS_INVALID_ARRAY,
+ error.InvalidInteger => JSC.Error.ERR_REDIS_INVALID_INTEGER,
+ error.InvalidSimpleString => JSC.Error.ERR_REDIS_INVALID_SIMPLE_STRING,
+ error.InvalidErrorString => JSC.Error.ERR_REDIS_INVALID_ERROR_STRING,
+ error.InvalidDouble, error.InvalidBoolean, error.InvalidNull, error.InvalidMap, error.InvalidSet, error.InvalidBigNumber, error.InvalidVerbatimString, error.InvalidBlobError, error.InvalidAttribute, error.InvalidPush => JSC.Error.ERR_REDIS_INVALID_RESPONSE,
+ error.AuthenticationFailed => JSC.Error.ERR_REDIS_AUTHENTICATION_FAILED,
+ error.InvalidCommand => JSC.Error.ERR_REDIS_INVALID_COMMAND,
+ error.InvalidArgument => JSC.Error.ERR_REDIS_INVALID_ARGUMENT,
+ error.UnsupportedProtocol => JSC.Error.ERR_REDIS_INVALID_RESPONSE,
+ error.InvalidResponseType => JSC.Error.ERR_REDIS_INVALID_RESPONSE_TYPE,
+ error.ConnectionTimeout => JSC.Error.ERR_REDIS_CONNECTION_TIMEOUT,
+ error.IdleTimeout => JSC.Error.ERR_REDIS_IDLE_TIMEOUT,
+ error.JSError => {
+ return globalObject.takeException(error.JSError);
+ },
+ error.OutOfMemory => {
+ globalObject.throwOutOfMemory() catch {};
+ return globalObject.takeException(error.JSError);
+ },
+ };
+
+ if (message) |msg| {
+ return error_code.fmt(globalObject, "{s}", .{msg});
+ }
+ return error_code.fmt(globalObject, "Valkey error: {s}", .{@errorName(err)});
+}
+
+// RESP protocol types
+pub const RESPType = enum(u8) {
+ // RESP2 types
+ SimpleString = '+',
+ Error = '-',
+ Integer = ':',
+ BulkString = '$',
+ Array = '*',
+
+ // RESP3 types
+ Null = '_',
+ Double = ',',
+ Boolean = '#',
+ BlobError = '!',
+ VerbatimString = '=',
+ Map = '%',
+ Set = '~',
+ Attribute = '|',
+ Push = '>',
+ BigNumber = '(',
+
+ pub fn fromByte(byte: u8) ?RESPType {
+ return switch (byte) {
+ @intFromEnum(RESPType.SimpleString) => .SimpleString,
+ @intFromEnum(RESPType.Error) => .Error,
+ @intFromEnum(RESPType.Integer) => .Integer,
+ @intFromEnum(RESPType.BulkString) => .BulkString,
+ @intFromEnum(RESPType.Array) => .Array,
+ @intFromEnum(RESPType.Null) => .Null,
+ @intFromEnum(RESPType.Double) => .Double,
+ @intFromEnum(RESPType.Boolean) => .Boolean,
+ @intFromEnum(RESPType.BlobError) => .BlobError,
+ @intFromEnum(RESPType.VerbatimString) => .VerbatimString,
+ @intFromEnum(RESPType.Map) => .Map,
+ @intFromEnum(RESPType.Set) => .Set,
+ @intFromEnum(RESPType.Attribute) => .Attribute,
+ @intFromEnum(RESPType.Push) => .Push,
+ @intFromEnum(RESPType.BigNumber) => .BigNumber,
+ else => null,
+ };
+ }
+};
+
+pub const RESPValue = union(RESPType) {
+ // RESP2 types
+ SimpleString: []const u8,
+ Error: []const u8,
+ Integer: i64,
+ BulkString: ?[]const u8,
+ Array: []RESPValue,
+
+ // RESP3 types
+ Null: void,
+ Double: f64,
+ Boolean: bool,
+ BlobError: []const u8,
+ VerbatimString: VerbatimString,
+ Map: []MapEntry,
+ Set: []RESPValue,
+ Attribute: Attribute,
+ Push: Push,
+ BigNumber: []const u8,
+
+ pub fn deinit(self: *RESPValue, allocator: std.mem.Allocator) void {
+ switch (self.*) {
+ .SimpleString => |str| allocator.free(str),
+ .Error => |str| allocator.free(str),
+ .Integer => {},
+ .BulkString => |maybe_str| if (maybe_str) |str| allocator.free(str),
+ .Array => |array| {
+ for (array) |*value| {
+ value.deinit(allocator);
+ }
+ allocator.free(array);
+ },
+ .Null => {},
+ .Double => {},
+ .Boolean => {},
+ .BlobError => |str| allocator.free(str),
+ .VerbatimString => |*verbatim| {
+ allocator.free(verbatim.format);
+ allocator.free(verbatim.content);
+ },
+ .Map => |entries| {
+ for (entries) |*entry| {
+ entry.deinit(allocator);
+ }
+ allocator.free(entries);
+ },
+ .Set => |set| {
+ for (set) |*value| {
+ value.deinit(allocator);
+ }
+ allocator.free(set);
+ },
+ .Attribute => |*attribute| {
+ attribute.deinit(allocator);
+ },
+ .Push => |*push| {
+ push.deinit(allocator);
+ },
+ .BigNumber => |str| allocator.free(str),
+ }
+ }
+
+ pub fn format(self: @This(), comptime _: []const u8, options: anytype, writer: anytype) !void {
+ switch (self) {
+ .SimpleString => |str| try writer.writeAll(str),
+ .Error => |str| try writer.writeAll(str),
+ .Integer => |int| try writer.print("{d}", .{int}),
+ .BulkString => |maybe_str| {
+ if (maybe_str) |str| {
+ try writer.writeAll(str);
+ } else {
+ try writer.writeAll("(nil)");
+ }
+ },
+ .Array => |array| {
+ try writer.writeAll("[");
+ for (array, 0..) |value, i| {
+ if (i > 0) try writer.writeAll(", ");
+ try value.format("", options, writer);
+ }
+ try writer.writeAll("]");
+ },
+ .Null => try writer.writeAll("(nil)"),
+ .Double => |d| try writer.print("{d}", .{d}),
+ .Boolean => |b| try writer.print("{}", .{b}),
+ .BlobError => |str| try writer.print("Error: {s}", .{str}),
+ .VerbatimString => |verbatim| try writer.print("{s}:{s}", .{ verbatim.format, verbatim.content }),
+ .Map => |entries| {
+ try writer.writeAll("{");
+ for (entries, 0..) |entry, i| {
+ if (i > 0) try writer.writeAll(", ");
+ try entry.key.format("", options, writer);
+ try writer.writeAll(": ");
+ try entry.value.format("", options, writer);
+ }
+ try writer.writeAll("}");
+ },
+ .Set => |set| {
+ try writer.writeAll("Set{");
+ for (set, 0..) |value, i| {
+ if (i > 0) try writer.writeAll(", ");
+ try value.format("", options, writer);
+ }
+ try writer.writeAll("}");
+ },
+ .Attribute => |attribute| {
+ try writer.writeAll("(Attr: ");
+ try writer.writeAll("{");
+ for (attribute.attributes, 0..) |entry, i| {
+ if (i > 0) try writer.writeAll(", ");
+ try entry.key.format("", options, writer);
+ try writer.writeAll(": ");
+ try entry.value.format("", options, writer);
+ }
+ try writer.writeAll("} => ");
+ try attribute.value.format("", options, writer);
+ try writer.writeAll(")");
+ },
+ .Push => |push| {
+ try writer.print("Push({s}: [", .{push.kind});
+ for (push.data, 0..) |value, i| {
+ if (i > 0) try writer.writeAll(", ");
+ try value.format("", options, writer);
+ }
+ try writer.writeAll("])");
+ },
+ .BigNumber => |str| try writer.print("BigNumber({s})", .{str}),
+ }
+ }
+
+ // Convert RESPValue to JSValue
+ pub fn toJS(self: *RESPValue, globalObject: *JSC.JSGlobalObject) bun.JSError!JSC.JSValue {
+ switch (self.*) {
+ .SimpleString => |str| return bun.String.createUTF8ForJS(globalObject, str),
+ .Error => |str| return valkeyErrorToJS(globalObject, str, RedisError.InvalidResponse),
+ .Integer => |int| return JSC.JSValue.jsNumber(int),
+ .BulkString => |maybe_str| {
+ if (maybe_str) |str| {
+ return bun.String.createUTF8ForJS(globalObject, str);
+ } else {
+ return JSC.JSValue.jsNull();
+ }
+ },
+ .Array => |array| {
+ var js_array = JSC.JSValue.createEmptyArray(globalObject, array.len);
+ for (array, 0..) |*item, i| {
+ const js_item = try item.toJS(globalObject);
+ js_array.putIndex(globalObject, @intCast(i), js_item);
+ }
+ return js_array;
+ },
+ .Null => return JSC.JSValue.jsNull(),
+ .Double => |d| return JSC.JSValue.jsNumber(d),
+ .Boolean => |b| return JSC.JSValue.jsBoolean(b),
+ .BlobError => |str| return valkeyErrorToJS(globalObject, str, RedisError.InvalidBlobError),
+ .VerbatimString => |verbatim| return bun.String.createUTF8ForJS(globalObject, verbatim.content),
+ .Map => |entries| {
+ var js_obj = JSC.JSValue.createEmptyObjectWithNullPrototype(globalObject);
+ for (entries) |*entry| {
+ const js_key = try entry.key.toJS(globalObject);
+ var key_str = try js_key.toBunString(globalObject);
+ defer key_str.deref();
+ const js_value = try entry.value.toJS(globalObject);
+
+ js_obj.putMayBeIndex(globalObject, &key_str, js_value);
+ }
+ return js_obj;
+ },
+ .Set => |set| {
+ var js_array = JSC.JSValue.createEmptyArray(globalObject, set.len);
+ for (set, 0..) |*item, i| {
+ const js_item = try item.toJS(globalObject);
+ js_array.putIndex(globalObject, @intCast(i), js_item);
+ }
+ return js_array;
+ },
+ .Attribute => |attribute| {
+ // For now, we just return the value and ignore attributes
+ // In the future, we could attach the attributes as a hidden property
+ return try attribute.value.toJS(globalObject);
+ },
+ .Push => |push| {
+ var js_obj = JSC.JSValue.createEmptyObjectWithNullPrototype(globalObject);
+
+ // Add the push type
+ const kind_str = bun.String.createUTF8ForJS(globalObject, push.kind);
+ js_obj.put(globalObject, "type", kind_str);
+
+ // Add the data as an array
+ var data_array = JSC.JSValue.createEmptyArray(globalObject, push.data.len);
+ for (push.data, 0..) |*item, i| {
+ const js_item = try item.toJS(globalObject);
+ data_array.putIndex(globalObject, @intCast(i), js_item);
+ }
+ js_obj.put(globalObject, "data", data_array);
+
+ return js_obj;
+ },
+ .BigNumber => |str| {
+ // Try to parse as number if possible
+ if (std.fmt.parseInt(i64, str, 10)) |int| {
+ return JSC.JSValue.jsNumber(int);
+ } else |_| {
+ // If it doesn't fit in an i64, return as string
+ return bun.String.createUTF8ForJS(globalObject, str);
+ }
+ },
+ }
+ }
+};
+
+pub const ValkeyReader = struct {
+ buffer: []const u8,
+ pos: usize = 0,
+
+ pub fn init(buffer: []const u8) ValkeyReader {
+ return .{
+ .buffer = buffer,
+ };
+ }
+
+ pub fn readByte(self: *ValkeyReader) RedisError!u8 {
+ if (self.pos >= self.buffer.len) return error.InvalidResponse;
+ const byte = self.buffer[self.pos];
+ self.pos += 1;
+ return byte;
+ }
+
+ pub fn readUntilCRLF(self: *ValkeyReader) RedisError![]const u8 {
+ const buffer = self.buffer[self.pos..];
+ for (buffer, 0..) |byte, i| {
+ if (byte == '\r' and buffer.len > i + 1 and buffer[i + 1] == '\n') {
+ const result = buffer[0..i];
+ self.pos += i + 2;
+ return result;
+ }
+ }
+
+ return error.InvalidResponse;
+ }
+
+ pub fn readInteger(self: *ValkeyReader) RedisError!i64 {
+ const str = try self.readUntilCRLF();
+ return std.fmt.parseInt(i64, str, 10) catch return error.InvalidInteger;
+ }
+
+ pub fn readDouble(self: *ValkeyReader) RedisError!f64 {
+ const str = try self.readUntilCRLF();
+
+ // Handle special values
+ if (std.mem.eql(u8, str, "inf")) return std.math.inf(f64);
+ if (std.mem.eql(u8, str, "-inf")) return -std.math.inf(f64);
+ if (std.mem.eql(u8, str, "nan")) return std.math.nan(f64);
+
+ // Parse normal double
+ return std.fmt.parseFloat(f64, str) catch return error.InvalidDouble;
+ }
+
+ pub fn readBoolean(self: *ValkeyReader) RedisError!bool {
+ const str = try self.readUntilCRLF();
+ if (str.len != 1) return error.InvalidBoolean;
+
+ return switch (str[0]) {
+ 't' => true,
+ 'f' => false,
+ else => error.InvalidBoolean,
+ };
+ }
+
+ pub fn readVerbatimString(self: *ValkeyReader, allocator: std.mem.Allocator) RedisError!VerbatimString {
+ const len = try self.readInteger();
+ if (len < 0) return error.InvalidVerbatimString;
+ if (self.pos + @as(usize, @intCast(len)) > self.buffer.len) return error.InvalidVerbatimString;
+
+ const content_with_format = self.buffer[self.pos .. self.pos + @as(usize, @intCast(len))];
+ self.pos += @as(usize, @intCast(len));
+
+ // Expect CRLF after content
+ const crlf = try self.readUntilCRLF();
+ if (crlf.len != 0) return error.InvalidVerbatimString;
+
+ // Format should be "xxx:" followed by content
+ if (content_with_format.len < 4 or content_with_format[3] != ':') {
+ return error.InvalidVerbatimString;
+ }
+
+ const format = try allocator.dupe(u8, content_with_format[0..3]);
+ const content = try allocator.dupe(u8, content_with_format[4..]);
+
+ return VerbatimString{
+ .format = format,
+ .content = content,
+ };
+ }
+
+ pub fn readValue(self: *ValkeyReader, allocator: std.mem.Allocator) RedisError!RESPValue {
+ const type_byte = try self.readByte();
+
+ return switch (RESPType.fromByte(type_byte) orelse return error.InvalidResponseType) {
+ // RESP2 types
+ .SimpleString => {
+ const str = try self.readUntilCRLF();
+ const owned = try allocator.dupe(u8, str);
+ return RESPValue{ .SimpleString = owned };
+ },
+ .Error => {
+ const str = try self.readUntilCRLF();
+ const owned = try allocator.dupe(u8, str);
+ return RESPValue{ .Error = owned };
+ },
+ .Integer => {
+ const int = try self.readInteger();
+ return RESPValue{ .Integer = int };
+ },
+ .BulkString => {
+ const len = try self.readInteger();
+ if (len < 0) return RESPValue{ .BulkString = null };
+ if (self.pos + @as(usize, @intCast(len)) > self.buffer.len) return error.InvalidResponse;
+ const str = self.buffer[self.pos .. self.pos + @as(usize, @intCast(len))];
+ self.pos += @as(usize, @intCast(len));
+ const crlf = try self.readUntilCRLF();
+ if (crlf.len != 0) return error.InvalidBulkString;
+ const owned = try allocator.dupe(u8, str);
+ return RESPValue{ .BulkString = owned };
+ },
+ .Array => {
+ const len = try self.readInteger();
+ if (len < 0) return RESPValue{ .Array = &[_]RESPValue{} };
+ const array = try allocator.alloc(RESPValue, @as(usize, @intCast(len)));
+ errdefer allocator.free(array);
+ var i: usize = 0;
+ errdefer {
+ for (array[0..i]) |*item| {
+ item.deinit(allocator);
+ }
+ }
+ while (i < len) : (i += 1) {
+ array[i] = try self.readValue(allocator);
+ }
+ return RESPValue{ .Array = array };
+ },
+
+ // RESP3 types
+ .Null => {
+ _ = try self.readUntilCRLF(); // Read and discard CRLF
+ return RESPValue{ .Null = {} };
+ },
+ .Double => {
+ const d = try self.readDouble();
+ return RESPValue{ .Double = d };
+ },
+ .Boolean => {
+ const b = try self.readBoolean();
+ return RESPValue{ .Boolean = b };
+ },
+ .BlobError => {
+ const len = try self.readInteger();
+ if (len < 0) return error.InvalidBlobError;
+ if (self.pos + @as(usize, @intCast(len)) > self.buffer.len) return error.InvalidBlobError;
+ const str = self.buffer[self.pos .. self.pos + @as(usize, @intCast(len))];
+ self.pos += @as(usize, @intCast(len));
+ const crlf = try self.readUntilCRLF();
+ if (crlf.len != 0) return error.InvalidBlobError;
+ const owned = try allocator.dupe(u8, str);
+ return RESPValue{ .BlobError = owned };
+ },
+ .VerbatimString => {
+ return RESPValue{ .VerbatimString = try self.readVerbatimString(allocator) };
+ },
+ .Map => {
+ const len = try self.readInteger();
+ if (len < 0) return error.InvalidMap;
+
+ const entries = try allocator.alloc(MapEntry, @as(usize, @intCast(len)));
+ errdefer allocator.free(entries);
+ var i: usize = 0;
+ errdefer {
+ for (entries[0..i]) |*entry| {
+ entry.deinit(allocator);
+ }
+ }
+
+ while (i < len) : (i += 1) {
+ entries[i] = .{ .key = try self.readValue(allocator), .value = try self.readValue(allocator) };
+ }
+ return RESPValue{ .Map = entries };
+ },
+ .Set => {
+ const len = try self.readInteger();
+ if (len < 0) return error.InvalidSet;
+
+ var set = try allocator.alloc(RESPValue, @as(usize, @intCast(len)));
+ errdefer allocator.free(set);
+ var i: usize = 0;
+ errdefer {
+ for (set[0..i]) |*item| {
+ item.deinit(allocator);
+ }
+ }
+ while (i < len) : (i += 1) {
+ set[i] = try self.readValue(allocator);
+ }
+ return RESPValue{ .Set = set };
+ },
+ .Attribute => {
+ const len = try self.readInteger();
+ if (len < 0) return error.InvalidAttribute;
+
+ var attrs = try allocator.alloc(MapEntry, @as(usize, @intCast(len)));
+ errdefer allocator.free(attrs);
+ var i: usize = 0;
+ errdefer {
+ for (attrs[0..i]) |*entry| {
+ entry.deinit(allocator);
+ }
+ }
+ while (i < len) : (i += 1) {
+ var key = try self.readValue(allocator);
+ errdefer key.deinit(allocator);
+ const value = try self.readValue(allocator);
+ attrs[i] = .{ .key = key, .value = value };
+ }
+
+ // Read the actual value that follows the attributes
+ const value_ptr = try allocator.create(RESPValue);
+ errdefer {
+ allocator.destroy(value_ptr);
+ }
+ value_ptr.* = try self.readValue(allocator);
+
+ return RESPValue{ .Attribute = .{
+ .attributes = attrs,
+ .value = value_ptr,
+ } };
+ },
+ .Push => {
+ const len = try self.readInteger();
+ if (len < 0 or len == 0) return error.InvalidPush;
+
+ // First element is the push type
+ const push_type = try self.readValue(allocator);
+ var push_type_str: []const u8 = "";
+
+ switch (push_type) {
+ .SimpleString => |str| push_type_str = str,
+ .BulkString => |maybe_str| {
+ if (maybe_str) |str| {
+ push_type_str = str;
+ } else {
+ return error.InvalidPush;
+ }
+ },
+ else => return error.InvalidPush,
+ }
+
+ // Copy the push type string since the original will be freed
+ const push_type_dup = try allocator.dupe(u8, push_type_str);
+ errdefer allocator.free(push_type_dup);
+
+ // Read the rest of the data
+ var data = try allocator.alloc(RESPValue, @as(usize, @intCast(len - 1)));
+ errdefer allocator.free(data);
+ var i: usize = 0;
+ errdefer {
+ for (data[0..i]) |*item| {
+ item.deinit(allocator);
+ }
+ }
+ while (i < len - 1) : (i += 1) {
+ data[i] = try self.readValue(allocator);
+ }
+
+ return RESPValue{ .Push = .{
+ .kind = push_type_dup,
+ .data = data,
+ } };
+ },
+ .BigNumber => {
+ const str = try self.readUntilCRLF();
+ const owned = try allocator.dupe(u8, str);
+ return RESPValue{ .BigNumber = owned };
+ },
+ };
+ }
+};
+
+pub const MapEntry = struct {
+ key: RESPValue,
+ value: RESPValue,
+
+ pub fn deinit(self: *MapEntry, allocator: std.mem.Allocator) void {
+ self.key.deinit(allocator);
+ self.value.deinit(allocator);
+ }
+};
+
+pub const VerbatimString = struct {
+ format: []const u8, // e.g. "txt" or "mkd"
+ content: []const u8,
+
+ pub fn deinit(self: *VerbatimString, allocator: std.mem.Allocator) void {
+ allocator.free(self.format);
+ allocator.free(self.content);
+ }
+};
+
+pub const Push = struct {
+ kind: []const u8,
+ data: []RESPValue,
+
+ pub fn deinit(self: *Push, allocator: std.mem.Allocator) void {
+ allocator.free(self.kind);
+ for (self.data) |*item| {
+ item.deinit(allocator);
+ }
+ allocator.free(self.data);
+ }
+};
+pub const Attribute = struct {
+ attributes: []MapEntry,
+ value: *RESPValue,
+
+ pub fn deinit(self: *Attribute, allocator: std.mem.Allocator) void {
+ for (self.attributes) |*entry| {
+ entry.deinit(allocator);
+ }
+ allocator.free(self.attributes);
+ self.value.deinit(allocator);
+ allocator.destroy(self.value);
+ }
+};
diff --git a/test/internal/ban-words.test.ts b/test/internal/ban-words.test.ts
index 43350d79fd..35bc790c57 100644
--- a/test/internal/ban-words.test.ts
+++ b/test/internal/ban-words.test.ts
@@ -28,7 +28,7 @@ const words: Record
"== alloc.ptr": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" },
"!= alloc.ptr": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" },
[String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 245, regex: true },
- "usingnamespace": { reason: "Zig deprecates this, and will not support it in incremental compilation.", limit: 492 },
+ "usingnamespace": { reason: "Zig deprecates this, and will not support it in incremental compilation.", limit: 494 },
};
const words_keys = [...Object.keys(words)];
diff --git a/test/js/valkey/docker-tls/Dockerfile b/test/js/valkey/docker-tls/Dockerfile
new file mode 100644
index 0000000000..6b043ca1d6
--- /dev/null
+++ b/test/js/valkey/docker-tls/Dockerfile
@@ -0,0 +1,59 @@
+# Dockerfile for Valkey/Redis with TLS support
+FROM redis:7-alpine
+
+# Install bash for initialization scripts
+RUN apk add --no-cache bash
+
+# Create certificates directory
+RUN mkdir -p /etc/redis/certs
+
+# Copy certificates
+COPY server.key /etc/redis/certs/
+COPY server.crt /etc/redis/certs/
+
+# Create initialization script
+RUN echo '#!/bin/bash\n\
+set -e\n\
+\n\
+# Wait for Redis to start\n\
+until redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt ping; do\n\
+ echo "Waiting for Redis TLS to start..."\n\
+ sleep 1\n\
+done\n\
+\n\
+echo "Redis TLS is ready!"\n\
+\n\
+# Set up some test data for persistence tests\n\
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt set bun_valkey_tls_test_init "initialization_successful"\n\
+\n\
+# Create test hash\n\
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt hset bun_valkey_tls_test_hash name "test_user" age "25" active "true"\n\
+\n\
+# Create test set\n\
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt sadd bun_valkey_tls_test_set "red" "green" "blue"\n\
+\n\
+# Create test list\n\
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt lpush bun_valkey_tls_test_list "first" "second" "third"\n\
+' > /docker-entrypoint-initdb.d/init-redis.sh
+
+# Make the script executable
+RUN chmod +x /docker-entrypoint-initdb.d/init-redis.sh
+
+# Configure Redis
+RUN echo "bind 0.0.0.0" > /etc/redis/redis.conf && \
+ echo "protected-mode no" >> /etc/redis/redis.conf && \
+ echo "appendonly yes" >> /etc/redis/redis.conf && \
+ echo "tls-port 6380" >> /etc/redis/redis.conf && \
+ echo "port 0" >> /etc/redis/redis.conf && \
+ echo "tls-cert-file /etc/redis/certs/server.crt" >> /etc/redis/redis.conf && \
+ echo "tls-key-file /etc/redis/certs/server.key" >> /etc/redis/redis.conf && \
+ echo "tls-ca-cert-file /etc/redis/certs/server.crt" >> /etc/redis/redis.conf && \
+ echo "unixsocket /tmp/redis.sock" >> /etc/redis/redis.conf && \
+ echo "unixsocketperm 777" >> /etc/redis/redis.conf
+
+# Expose TLS port and create volume for Unix socket
+EXPOSE 6380
+VOLUME /tmp
+
+# Use custom entrypoint to run initialization script
+CMD ["redis-server", "/etc/redis/redis.conf"]
\ No newline at end of file
diff --git a/test/js/valkey/docker-tls/server.crt b/test/js/valkey/docker-tls/server.crt
new file mode 100644
index 0000000000..4e79fbc049
--- /dev/null
+++ b/test/js/valkey/docker-tls/server.crt
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDHzCCAgegAwIBAgIUOvkvGE7rI3OXABlz71VQMatWElgwDQYJKoZIhvcNAQEL
+BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI1MDQwNzEwMjgzN1oXDTI2MDQw
+NzEwMjgzN1owFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA88KqRAdx13qQcROKeSotdpfUPzPekpbNfetNZjBsmf6N
+t4mtAhnAaJpPKkWvs1pDA5/qD3ZxAcLEa31y9AY76TvgZKq0yiD2MTOYFFTcstx5
+Voi2MLrSYN8Xobq7K7r5zQD7TrHEu0S3sSdA8GDtyrx2W8owuNtqUt1oBDRYRZoT
+Nu3/bwjzuBGUrrdYwzBQvr5XOA3v2yAexgffOeSz8cZtvR+BL0sxu6SDN1VpQe//
+KHQy1jZEHX0mOvRoB+95MfxHEgC7O8fYcrxsHkpjvacjh7TrOllbkcEAmr/exOCw
+MLnZl57Xi7bQMVAPM1TR41mSmvHessPuPXCrzVKn0QIDAQABo2kwZzAdBgNVHQ4E
+FgQUJszPLUfpqnggGY7NuVuGl388G44wHwYDVR0jBBgwFoAUJszPLUfpqnggGY7N
+uVuGl388G44wDwYDVR0TAQH/BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3Qw
+DQYJKoZIhvcNAQELBQADggEBAG4R3o6EZnODINfNIrM+Cag9ATmyEqm4MNMTyH9e
+58ltgU+k5RQKBywdxlC/71BW2I4lsbMz8qS+fcFTOC5a87rEO2qCWFw9Ew4mKJkA
+4gz1RBS4xShNyQewYV2U+ZhrDqp5tnwn+ZXGgMN5Jl0EwNeL6Q5U0zERfDbaE4xZ
+BHrGvnHh9Rm7nkSG9uAIITJ71uKjO5ogPgzzPe++47Xug0o4e3gn3De7WATaSuYa
+Oe9sIYB1YuZoQQoa1u+74+sguKV8/RdkP0rxaSKuGl8KUooNH6MLPnT8n+y+7mQS
+gIAFeezbuqGrFPL2P6ZXmEX39Tlz9f9OmqpmzruUZd1lvBs=
+-----END CERTIFICATE-----
diff --git a/test/js/valkey/docker-tls/server.key b/test/js/valkey/docker-tls/server.key
new file mode 100644
index 0000000000..8185ea8b0c
--- /dev/null
+++ b/test/js/valkey/docker-tls/server.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDzwqpEB3HXepBx
+E4p5Ki12l9Q/M96Sls19601mMGyZ/o23ia0CGcBomk8qRa+zWkMDn+oPdnEBwsRr
+fXL0BjvpO+BkqrTKIPYxM5gUVNyy3HlWiLYwutJg3xehursruvnNAPtOscS7RLex
+J0DwYO3KvHZbyjC422pS3WgENFhFmhM27f9vCPO4EZSut1jDMFC+vlc4De/bIB7G
+B9855LPxxm29H4EvSzG7pIM3VWlB7/8odDLWNkQdfSY69GgH73kx/EcSALs7x9hy
+vGweSmO9pyOHtOs6WVuRwQCav97E4LAwudmXnteLttAxUA8zVNHjWZKa8d6yw+49
+cKvNUqfRAgMBAAECggEAVuBq6asTll4+66YwxKVVJb7QLSRx76HipD3ATKr2kd3p
+KWBesnB2JHHWxDSo/c2uM7UDaTZn6V4+viasWS99m8801vwGSkH8LKX8Tka+j9rH
+PiGkeXKkN1VbqU8RlXDixf9TEgWGnc3MgE2Ctgl9xrNrpaRGwCOnXdg+Ub1MNqWU
+nmxMGP+G2b0ZgbirwECpdGIOvdeygQ8/Jo6brbGaPcewo1/5545wWHPpb+zXrBra
+E//3i6geb9NJUAMKl0cTURoZWY3pF4yElV2ZZcE+cH5/fesYN7ZTZAccvo1lBPjy
+OiC/fEuhSAH5iobH1KPrQbOby2YdUUOxg3dXR6qV5wKBgQD88jFV/a4togqaEv9x
+yoCOtSE6CQmRZmCGhZSk60yR6JdFWQ1Uvc8brJdc2MRC/WRWr+nI6jGgobEbMZFO
+KjHKALwOokFiSzj8I9u+IxGLM7TuGJFtKAT3AEwbQrWp3CxCrKsrIYwVkSwtViG0
+cOVXQCkmuw3ewneHdnnWxiJq/wKBgQD2tBTQRWc/fOeR9TucGp6yQpFQaYL8qA4m
+FpihxbX2JcTS8ge28ggc+c1seMqYfSBtnIRCWWdDB7BjquQT/lnQqZq5TEW1/FbD
+9MML0VCM5TCAH1v6rcgMRRpsRYhB+vhbLcenSZGQoojozxEQts8/1HCSHTw0PzjK
+dRVClb39LwKBgHGdY7WpPZw3paVxFRYKjFYNW8BSoN6TapXh2FN/cSQ0ogW/KzK+
+ExHuIwrMPtOMN46Mc2kQcHwjRIbfa9H9N+HxFIdKMC4zdYQjoyczX0T0U7eCh4fN
+KvW7R3QTMb/7KlJEdpnn9qEVVQ+EGZ2P/COFqTZBXMiK9t98wttKodkHAoGAfGV1
+kUdNto+u3MRBWId7ufsi9t8dM3UyHTaLpBbjl8iXpJ5yEWede67iTG3kClwdu+eO
+MT6PeRcpdDg5ZXN9ql+7KvAwvoEM5yZGK3FSIpl2iURGxvJVywoVNr8g49Q+4wsE
+f2/zPHEYg/vVaQ4lFtRyJtsi/l1ar4u2Oqry7/UCgYEAh9+0SIBhdVny48EqQXmi
+5WFiKhb5BcEUuLFlZji/z+y6a7LopCisDoegvboiDByxPizbKGzIChxLqO3SQKXb
+kKiGsAITmMZ2Kl6jRhMUDgq4/DsjBo/h3guk+xQZ2DHtT9v1FLvBqCi8poP4XUMy
+9BbnrT03dl7N2+9fIdjnC9Y=
+-----END PRIVATE KEY-----
diff --git a/test/js/valkey/docker-unified/Dockerfile b/test/js/valkey/docker-unified/Dockerfile
new file mode 100644
index 0000000000..7b03eba6fa
--- /dev/null
+++ b/test/js/valkey/docker-unified/Dockerfile
@@ -0,0 +1,31 @@
+# Unified Dockerfile for Valkey/Redis with TCP, TLS, Unix socket, and authentication support
+FROM redis:7-alpine
+
+# Set user to root
+USER root
+
+# Install bash for initialization scripts
+RUN apk add --no-cache bash
+
+# Create directories
+RUN mkdir -p /etc/redis/certs
+RUN mkdir -p /docker-entrypoint-initdb.d
+
+# Copy certificates
+COPY server.key /etc/redis/certs/
+COPY server.crt /etc/redis/certs/
+
+# Copy configuration files
+COPY redis.conf /etc/redis/
+COPY users.acl /etc/redis/
+
+# Copy initialization script
+COPY scripts/init-redis.sh /docker-entrypoint-initdb.d/
+RUN chmod +x /docker-entrypoint-initdb.d/init-redis.sh
+
+# Expose ports
+EXPOSE 6379 6380
+WORKDIR /docker-entrypoint-initdb.d
+
+# Use custom entrypoint to run initialization script
+CMD ["redis-server", "/etc/redis/redis.conf"]
\ No newline at end of file
diff --git a/test/js/valkey/docker-unified/redis.conf b/test/js/valkey/docker-unified/redis.conf
new file mode 100644
index 0000000000..0608688a31
--- /dev/null
+++ b/test/js/valkey/docker-unified/redis.conf
@@ -0,0 +1,10 @@
+bind 0.0.0.0
+protected-mode no
+appendonly yes
+port 6379
+tls-port 6380
+tls-cert-file /etc/redis/certs/server.crt
+tls-key-file /etc/redis/certs/server.key
+tls-ca-cert-file /etc/redis/certs/server.crt
+databases 16
+aclfile /etc/redis/users.acl
\ No newline at end of file
diff --git a/test/js/valkey/docker-unified/scripts/init-redis.sh b/test/js/valkey/docker-unified/scripts/init-redis.sh
new file mode 100755
index 0000000000..12b6669712
--- /dev/null
+++ b/test/js/valkey/docker-unified/scripts/init-redis.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+set -e
+
+echo "Starting Redis initialization script"
+
+# Function to wait for Redis with timeout
+wait_for_redis() {
+ local type=$1
+ local max_attempts=20
+ local attempt=1
+ local command=$2
+
+ echo "Waiting for Redis $type to start..."
+ until eval "$command" || [ $attempt -gt $max_attempts ]; do
+ echo "Waiting for Redis $type to start... (Attempt $attempt/$max_attempts)"
+ sleep 1
+ ((attempt++))
+ done
+
+ if [ $attempt -gt $max_attempts ]; then
+ echo "ERROR: Redis $type failed to start after $max_attempts attempts"
+ return 1
+ else
+ echo "Redis $type is ready!"
+ return 0
+ fi
+}
+
+# Wait for Redis TCP to start
+wait_for_redis "TCP" "redis-cli -p 6379 ping > /dev/null 2>&1"
+
+# Wait for Redis TLS to start
+wait_for_redis "TLS" "redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt -p 6380 ping > /dev/null 2>&1"
+
+# Wait for Redis Unix socket to start
+wait_for_redis "UNIX" "redis-cli -s /tmp/redis.sock ping > /dev/null 2>&1"
+
+echo "Setting up test data..."
+
+# Set up some test data for TCP connection in DB 0
+redis-cli -p 6379 select 0
+redis-cli -p 6379 set bun_valkey_test_init "initialization_successful"
+redis-cli -p 6379 hset bun_valkey_test_hash name "test_user" age "25" active "true"
+redis-cli -p 6379 sadd bun_valkey_test_set "red" "green" "blue"
+redis-cli -p 6379 lpush bun_valkey_test_list "first" "second" "third"
+
+# Set up some test data for TLS connection in DB 1
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt -p 6380 select 1
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt -p 6380 set bun_valkey_tls_test_init "initialization_successful"
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt -p 6380 hset bun_valkey_tls_test_hash name "test_user" age "25" active "true"
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt -p 6380 sadd bun_valkey_tls_test_set "red" "green" "blue"
+redis-cli --tls --cert /etc/redis/certs/server.crt --key /etc/redis/certs/server.key --cacert /etc/redis/certs/server.crt -p 6380 lpush bun_valkey_tls_test_list "first" "second" "third"
+
+# Set up some test data for Unix socket connection in DB 2
+redis-cli -s /tmp/redis.sock select 2
+redis-cli -s /tmp/redis.sock set bun_valkey_unix_test_init "initialization_successful"
+redis-cli -s /tmp/redis.sock hset bun_valkey_unix_test_hash name "test_user" age "25" active "true"
+redis-cli -s /tmp/redis.sock sadd bun_valkey_unix_test_set "red" "green" "blue"
+redis-cli -s /tmp/redis.sock lpush bun_valkey_unix_test_list "first" "second" "third"
+
+# Set up test data for authenticated connection with testuser
+redis-cli -p 6379 -a test123 --user testuser select 3
+redis-cli -p 6379 -a test123 --user testuser set bun_valkey_auth_test_init "auth_initialization_successful"
+redis-cli -p 6379 -a test123 --user testuser hset bun_valkey_auth_test_hash name "auth_user" age "30" active "true"
+
+# Set up test data for read-only user
+redis-cli -p 6379 select 4
+redis-cli -p 6379 set bun_valkey_readonly_test "readonly_test"
+
+# Set up test data for write-only user
+redis-cli -p 6379 select 5
+redis-cli -p 6379 set bun_valkey_writeonly_test "writeonly_test"
+
+echo "Redis initialization complete!"
\ No newline at end of file
diff --git a/test/js/valkey/docker-unified/server.crt b/test/js/valkey/docker-unified/server.crt
new file mode 100644
index 0000000000..4e79fbc049
--- /dev/null
+++ b/test/js/valkey/docker-unified/server.crt
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDHzCCAgegAwIBAgIUOvkvGE7rI3OXABlz71VQMatWElgwDQYJKoZIhvcNAQEL
+BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI1MDQwNzEwMjgzN1oXDTI2MDQw
+NzEwMjgzN1owFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA88KqRAdx13qQcROKeSotdpfUPzPekpbNfetNZjBsmf6N
+t4mtAhnAaJpPKkWvs1pDA5/qD3ZxAcLEa31y9AY76TvgZKq0yiD2MTOYFFTcstx5
+Voi2MLrSYN8Xobq7K7r5zQD7TrHEu0S3sSdA8GDtyrx2W8owuNtqUt1oBDRYRZoT
+Nu3/bwjzuBGUrrdYwzBQvr5XOA3v2yAexgffOeSz8cZtvR+BL0sxu6SDN1VpQe//
+KHQy1jZEHX0mOvRoB+95MfxHEgC7O8fYcrxsHkpjvacjh7TrOllbkcEAmr/exOCw
+MLnZl57Xi7bQMVAPM1TR41mSmvHessPuPXCrzVKn0QIDAQABo2kwZzAdBgNVHQ4E
+FgQUJszPLUfpqnggGY7NuVuGl388G44wHwYDVR0jBBgwFoAUJszPLUfpqnggGY7N
+uVuGl388G44wDwYDVR0TAQH/BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3Qw
+DQYJKoZIhvcNAQELBQADggEBAG4R3o6EZnODINfNIrM+Cag9ATmyEqm4MNMTyH9e
+58ltgU+k5RQKBywdxlC/71BW2I4lsbMz8qS+fcFTOC5a87rEO2qCWFw9Ew4mKJkA
+4gz1RBS4xShNyQewYV2U+ZhrDqp5tnwn+ZXGgMN5Jl0EwNeL6Q5U0zERfDbaE4xZ
+BHrGvnHh9Rm7nkSG9uAIITJ71uKjO5ogPgzzPe++47Xug0o4e3gn3De7WATaSuYa
+Oe9sIYB1YuZoQQoa1u+74+sguKV8/RdkP0rxaSKuGl8KUooNH6MLPnT8n+y+7mQS
+gIAFeezbuqGrFPL2P6ZXmEX39Tlz9f9OmqpmzruUZd1lvBs=
+-----END CERTIFICATE-----
diff --git a/test/js/valkey/docker-unified/server.key b/test/js/valkey/docker-unified/server.key
new file mode 100644
index 0000000000..8185ea8b0c
--- /dev/null
+++ b/test/js/valkey/docker-unified/server.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDzwqpEB3HXepBx
+E4p5Ki12l9Q/M96Sls19601mMGyZ/o23ia0CGcBomk8qRa+zWkMDn+oPdnEBwsRr
+fXL0BjvpO+BkqrTKIPYxM5gUVNyy3HlWiLYwutJg3xehursruvnNAPtOscS7RLex
+J0DwYO3KvHZbyjC422pS3WgENFhFmhM27f9vCPO4EZSut1jDMFC+vlc4De/bIB7G
+B9855LPxxm29H4EvSzG7pIM3VWlB7/8odDLWNkQdfSY69GgH73kx/EcSALs7x9hy
+vGweSmO9pyOHtOs6WVuRwQCav97E4LAwudmXnteLttAxUA8zVNHjWZKa8d6yw+49
+cKvNUqfRAgMBAAECggEAVuBq6asTll4+66YwxKVVJb7QLSRx76HipD3ATKr2kd3p
+KWBesnB2JHHWxDSo/c2uM7UDaTZn6V4+viasWS99m8801vwGSkH8LKX8Tka+j9rH
+PiGkeXKkN1VbqU8RlXDixf9TEgWGnc3MgE2Ctgl9xrNrpaRGwCOnXdg+Ub1MNqWU
+nmxMGP+G2b0ZgbirwECpdGIOvdeygQ8/Jo6brbGaPcewo1/5545wWHPpb+zXrBra
+E//3i6geb9NJUAMKl0cTURoZWY3pF4yElV2ZZcE+cH5/fesYN7ZTZAccvo1lBPjy
+OiC/fEuhSAH5iobH1KPrQbOby2YdUUOxg3dXR6qV5wKBgQD88jFV/a4togqaEv9x
+yoCOtSE6CQmRZmCGhZSk60yR6JdFWQ1Uvc8brJdc2MRC/WRWr+nI6jGgobEbMZFO
+KjHKALwOokFiSzj8I9u+IxGLM7TuGJFtKAT3AEwbQrWp3CxCrKsrIYwVkSwtViG0
+cOVXQCkmuw3ewneHdnnWxiJq/wKBgQD2tBTQRWc/fOeR9TucGp6yQpFQaYL8qA4m
+FpihxbX2JcTS8ge28ggc+c1seMqYfSBtnIRCWWdDB7BjquQT/lnQqZq5TEW1/FbD
+9MML0VCM5TCAH1v6rcgMRRpsRYhB+vhbLcenSZGQoojozxEQts8/1HCSHTw0PzjK
+dRVClb39LwKBgHGdY7WpPZw3paVxFRYKjFYNW8BSoN6TapXh2FN/cSQ0ogW/KzK+
+ExHuIwrMPtOMN46Mc2kQcHwjRIbfa9H9N+HxFIdKMC4zdYQjoyczX0T0U7eCh4fN
+KvW7R3QTMb/7KlJEdpnn9qEVVQ+EGZ2P/COFqTZBXMiK9t98wttKodkHAoGAfGV1
+kUdNto+u3MRBWId7ufsi9t8dM3UyHTaLpBbjl8iXpJ5yEWede67iTG3kClwdu+eO
+MT6PeRcpdDg5ZXN9ql+7KvAwvoEM5yZGK3FSIpl2iURGxvJVywoVNr8g49Q+4wsE
+f2/zPHEYg/vVaQ4lFtRyJtsi/l1ar4u2Oqry7/UCgYEAh9+0SIBhdVny48EqQXmi
+5WFiKhb5BcEUuLFlZji/z+y6a7LopCisDoegvboiDByxPizbKGzIChxLqO3SQKXb
+kKiGsAITmMZ2Kl6jRhMUDgq4/DsjBo/h3guk+xQZ2DHtT9v1FLvBqCi8poP4XUMy
+9BbnrT03dl7N2+9fIdjnC9Y=
+-----END PRIVATE KEY-----
diff --git a/test/js/valkey/docker-unified/users.acl b/test/js/valkey/docker-unified/users.acl
new file mode 100644
index 0000000000..ec725dba50
--- /dev/null
+++ b/test/js/valkey/docker-unified/users.acl
@@ -0,0 +1,4 @@
+user default on nopass ~* &* +@all
+user testuser on >test123 ~* &* +@all
+user readonly on >readonly ~* &* +@read
+user writeonly on >writeonly ~* &* +@write
\ No newline at end of file
diff --git a/test/js/valkey/docker/Dockerfile b/test/js/valkey/docker/Dockerfile
new file mode 100644
index 0000000000..cd09450e84
--- /dev/null
+++ b/test/js/valkey/docker/Dockerfile
@@ -0,0 +1,44 @@
+# Dockerfile for Valkey/Redis testing
+FROM redis:7-alpine
+
+# Install bash for initialization scripts
+RUN apk add --no-cache bash
+
+# Create initialization script
+RUN echo '#!/bin/bash\n\
+set -e\n\
+\n\
+# Wait for Redis to start\n\
+until redis-cli ping; do\n\
+ echo "Waiting for Redis to start..."\n\
+ sleep 1\n\
+done\n\
+\n\
+echo "Redis is ready!"\n\
+\n\
+# Set up some test data for persistence tests\n\
+redis-cli set bun_valkey_test_init "initialization_successful"\n\
+\n\
+# Create test hash\n\
+redis-cli hset bun_valkey_test_hash name "test_user" age "25" active "true"\n\
+\n\
+# Create test set\n\
+redis-cli sadd bun_valkey_test_set "red" "green" "blue"\n\
+\n\
+# Create test list\n\
+redis-cli lpush bun_valkey_test_list "first" "second" "third"\n\
+' > /docker-entrypoint-initdb.d/init-redis.sh
+
+# Make the script executable
+RUN chmod +x /docker-entrypoint-initdb.d/init-redis.sh
+
+# Configure Redis
+RUN echo "bind 0.0.0.0" > /etc/redis/redis.conf && \
+ echo "protected-mode no" >> /etc/redis/redis.conf && \
+ echo "appendonly yes" >> /etc/redis/redis.conf
+
+# Expose Redis port
+EXPOSE 6379
+
+# Use custom entrypoint to run initialization script
+CMD ["redis-server", "/etc/redis/redis.conf"]
\ No newline at end of file
diff --git a/test/js/valkey/integration/complex-operations.test.ts b/test/js/valkey/integration/complex-operations.test.ts
new file mode 100644
index 0000000000..6dae7446ec
--- /dev/null
+++ b/test/js/valkey/integration/complex-operations.test.ts
@@ -0,0 +1,471 @@
+import { randomUUIDv7, RedisClient } from "bun";
+import { describe, expect, test, beforeEach, afterEach, afterAll } from "bun:test";
+import { ConnectionType, createClient, ctx, isEnabled } from "../test-utils";
+
+/**
+ * Integration test suite for complex Redis operations
+ * - Transaction handling
+ * - Pipelining (to be implemented)
+ * - Complex data type operations
+ * - Realistic use cases
+ */
+describe.skipIf(!isEnabled)("Valkey: Complex Operations", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+ describe("Multi/Exec Transactions", () => {
+ test("should execute commands in a transaction", async () => {
+ const prefix = ctx.generateKey("transaction");
+ const key1 = `${prefix}-1`;
+ const key2 = `${prefix}-2`;
+ const key3 = `${prefix}-3`;
+
+ // Start transaction
+ await ctx.redis.send("MULTI", []);
+
+ // Queue commands in transaction
+ const queueResults = await Promise.all([
+ ctx.redis.set(key1, "value1"),
+ ctx.redis.set(key2, "value2"),
+ ctx.redis.incr(key3),
+ ctx.redis.get(key1),
+ ]);
+
+ // All queue commands should return "QUEUED"
+ for (const result of queueResults) {
+ expect(result).toBe("QUEUED");
+ }
+
+ // Execute transaction
+ const execResult = await ctx.redis.send("EXEC", []);
+
+ // Should get an array of results
+ expect(Array.isArray(execResult)).toBe(true);
+ expect(execResult.length).toBe(4);
+
+ // Check individual results
+ expect(execResult[0]).toBe("OK"); // SET result
+ expect(execResult[1]).toBe("OK"); // SET result
+ expect(execResult[2]).toBe(1); // INCR result
+ expect(execResult[3]).toBe("value1"); // GET result
+
+ // Verify the transaction was applied
+ const key1Value = await ctx.redis.get(key1);
+ expect(key1Value).toBe("value1");
+
+ const key2Value = await ctx.redis.get(key2);
+ expect(key2Value).toBe("value2");
+
+ const key3Value = await ctx.redis.get(key3);
+ expect(key3Value).toBe("1");
+ });
+
+ test("should handle transaction discards", async () => {
+ const prefix = ctx.generateKey("transaction-discard");
+ const key = `${prefix}-key`;
+
+ // Set initial value
+ await ctx.redis.set(key, "initial");
+
+ // Start transaction
+ await ctx.redis.send("MULTI", []);
+
+ // Queue some commands
+ await ctx.redis.set(key, "changed");
+ await ctx.redis.incr(`${prefix}-counter`);
+
+ // Discard the transaction
+ const discardResult = await ctx.redis.send("DISCARD", []);
+ expect(discardResult).toBe("OK");
+
+ // Verify the key was not changed
+ const value = await ctx.redis.get(key);
+ expect(value).toBe("initial");
+
+ // Verify counter was not incremented
+ const counterValue = await ctx.redis.get(`${prefix}-counter`);
+ expect(counterValue).toBeNull();
+ });
+
+ test("should handle transaction errors", async () => {
+ const prefix = ctx.generateKey("transaction-error");
+ const key = `${prefix}-key`;
+
+ // Set initial value
+ await ctx.redis.set(key, "string-value");
+
+ // Start transaction
+ await ctx.redis.send("MULTI", []);
+
+ // Queue valid command
+ await ctx.redis.set(`${prefix}-valid`, "valid");
+
+ // Queue command that will fail (INCR on a string)
+ await ctx.redis.incr(key);
+
+ // Queue another valid command
+ await ctx.redis.set(`${prefix}-after`, "after");
+
+ // Execute transaction
+ const execResult = await ctx.redis.send("EXEC", []);
+
+ // Should get an array of results, with error for the failing command
+ expect(Array.isArray(execResult)).toBe(true);
+ expect(execResult).toMatchInlineSnapshot(`
+ [
+ "OK",
+ [Error: ERR value is not an integer or out of range],
+ "OK",
+ ]
+ `);
+
+ // Verify the valid commands were executed
+ const validValue = await ctx.redis.get(`${prefix}-valid`);
+ expect(validValue).toBe("valid");
+
+ const afterValue = await ctx.redis.get(`${prefix}-after`);
+ expect(afterValue).toBe("after");
+ });
+
+ test("should handle nested commands in transaction", async () => {
+ const prefix = ctx.generateKey("transaction-nested");
+ const hashKey = `${prefix}-hash`;
+ const setKey = `${prefix}-set`;
+
+ // Start transaction
+ await ctx.redis.send("MULTI", []);
+
+ // Queue complex data type commands
+ await ctx.redis.send("HSET", [hashKey, "field1", "value1", "field2", "value2"]);
+ await ctx.redis.send("SADD", [setKey, "member1", "member2", "member3"]);
+ await ctx.redis.send("HGETALL", [hashKey]);
+ await ctx.redis.send("SMEMBERS", [setKey]);
+
+ // Execute transaction
+ const execResult = await ctx.redis.send("EXEC", []);
+
+ // Should get an array of results
+ expect(Array.isArray(execResult)).toBe(true);
+ expect(execResult.length).toBe(4);
+
+ // HSET should return number of fields added
+ expect(execResult[0]).toBe(2);
+
+ // SADD should return number of members added
+ expect(execResult[1]).toBe(3);
+
+ // HGETALL should return hash object or array
+ const hashResult = execResult[2];
+ if (typeof hashResult === "object" && hashResult !== null) {
+ // RESP3 style (map)
+ expect(hashResult.field1).toBe("value1");
+ expect(hashResult.field2).toBe("value2");
+ } else if (Array.isArray(hashResult)) {
+ // RESP2 style (array of field-value pairs)
+ expect(hashResult.length).toBe(4);
+ expect(hashResult).toContain("field1");
+ expect(hashResult).toContain("value1");
+ expect(hashResult).toContain("field2");
+ expect(hashResult).toContain("value2");
+ }
+
+ // SMEMBERS should return array
+ expect(Array.isArray(execResult[3])).toBe(true);
+ expect(execResult[3].length).toBe(3);
+ expect(execResult[3]).toContain("member1");
+ expect(execResult[3]).toContain("member2");
+ expect(execResult[3]).toContain("member3");
+ });
+ });
+
+ describe("Complex Key Patterns", () => {
+ test("should handle hierarchical key patterns", async () => {
+ // Create hierarchical key structure (user:id:field)
+ const userId = randomUUIDv7().substring(0, 8);
+ const baseKey = ctx.generateKey(`user:${userId}`);
+
+ // Set multiple fields
+ await ctx.redis.set(`${baseKey}:name`, "John Doe");
+ await ctx.redis.set(`${baseKey}:email`, "john@example.com");
+ await ctx.redis.set(`${baseKey}:age`, "30");
+
+ // Create a counter
+ await ctx.redis.incr(`${baseKey}:visits`);
+ await ctx.redis.incr(`${baseKey}:visits`);
+
+ // Get all keys matching the pattern
+ const patternResult = await ctx.redis.send("KEYS", [`${baseKey}:*`]);
+
+ // Should find all our keys
+ expect(Array.isArray(patternResult)).toBe(true);
+ expect(patternResult.length).toBe(4);
+
+ // Sort for consistent snapshot
+ const sortedKeys = [...patternResult].sort();
+ expect(sortedKeys).toMatchInlineSnapshot(`
+ [
+ "${baseKey}:age",
+ "${baseKey}:email",
+ "${baseKey}:name",
+ "${baseKey}:visits",
+ ]
+ `);
+
+ // Verify values
+ const nameValue = await ctx.redis.get(`${baseKey}:name`);
+ expect(nameValue).toBe("John Doe");
+
+ const visitsValue = await ctx.redis.get(`${baseKey}:visits`);
+ expect(visitsValue).toBe("2");
+ });
+
+ test("should handle complex key patterns with expiry", async () => {
+ // Create session-like structure
+ const sessionId = randomUUIDv7().substring(0, 8);
+ const baseKey = ctx.generateKey(`session:${sessionId}`);
+
+ // Set session data with expiry
+ await ctx.redis.set(`${baseKey}:data`, JSON.stringify({ user: "user123", role: "admin" }));
+ await ctx.redis.expire(`${baseKey}:data`, 30); // 30 second expiry
+
+ // Set session heartbeat with shorter expiry
+ await ctx.redis.set(`${baseKey}:heartbeat`, Date.now().toString());
+ await ctx.redis.expire(`${baseKey}:heartbeat`, 10); // 10 second expiry
+
+ // Verify TTLs
+ const dataTtl = await ctx.redis.ttl(`${baseKey}:data`);
+ expect(typeof dataTtl).toBe("number");
+ expect(dataTtl).toBeGreaterThan(0);
+ expect(dataTtl).toBeLessThanOrEqual(30);
+
+ const heartbeatTtl = await ctx.redis.ttl(`${baseKey}:heartbeat`);
+ expect(typeof heartbeatTtl).toBe("number");
+ expect(heartbeatTtl).toBeGreaterThan(0);
+ expect(heartbeatTtl).toBeLessThanOrEqual(10);
+
+ // Update heartbeat and reset TTL
+ await ctx.redis.set(`${baseKey}:heartbeat`, Date.now().toString());
+ await ctx.redis.expire(`${baseKey}:heartbeat`, 10);
+
+ // Verify updated TTL
+ const updatedTtl = await ctx.redis.ttl(`${baseKey}:heartbeat`);
+ expect(updatedTtl).toBeGreaterThan(0);
+ expect(updatedTtl).toBeLessThanOrEqual(10);
+ });
+ });
+
+ describe("Realistic Use Cases", () => {
+ test("should implement a simple rate limiter", async () => {
+ // Implementation of a rate limiter using Redis
+ const ipAddress = "192.168.1.1";
+ const rateLimitKey = ctx.generateKey(`ratelimit:${ipAddress}`);
+ const maxRequests = 5;
+ const windowSecs = 10;
+
+ // Function to check if the IP is rate limited
+ async function isRateLimited() {
+ // Get current count
+ const count = await ctx.redis.incr(rateLimitKey);
+
+ // If this is the first request, set expiry
+ if (count === 1) {
+ await ctx.redis.expire(rateLimitKey, windowSecs);
+ }
+
+ // Check if over limit
+ return count > maxRequests;
+ }
+
+ // Simulate multiple requests
+ const results = [];
+ for (let i = 0; i < 7; i++) {
+ results.push(await isRateLimited());
+ }
+
+ // Check results with inline snapshot for better readability
+ expect(results).toMatchInlineSnapshot(`
+ [
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ ]
+ `);
+
+ const finalCount = await ctx.redis.get(rateLimitKey);
+ expect(finalCount).toBe("7");
+
+ // Verify TTL exists
+ const ttl = await ctx.redis.ttl(rateLimitKey);
+ expect(ttl).toBeGreaterThan(0);
+ expect(ttl).toBeLessThanOrEqual(windowSecs);
+ });
+
+ test("should implement a simple cache with expiry", async () => {
+ const cachePrefix = ctx.generateKey("cache");
+
+ // Cache implementation
+ async function getOrSetCache(key, ttl, fetchFunction) {
+ const cacheKey = `${cachePrefix}:${key}`;
+
+ // Try to get from cache
+ const cachedValue = await ctx.redis.get(cacheKey);
+ if (cachedValue !== null) {
+ return JSON.parse(cachedValue);
+ }
+
+ // Not in cache, fetch the value
+ const freshValue = await fetchFunction();
+
+ // Store in cache with expiry
+ await ctx.redis.set(cacheKey, JSON.stringify(freshValue));
+ await ctx.redis.expire(cacheKey, ttl);
+
+ return freshValue;
+ }
+
+ // Simulate expensive operation
+ let fetchCount = 0;
+ async function fetchData() {
+ fetchCount++;
+ return { data: "example", timestamp: Date.now() };
+ }
+
+ // First fetch should call the function
+ const result1 = await getOrSetCache("test-key", 30, fetchData);
+ expect(result1).toBeDefined();
+ expect(fetchCount).toBe(1);
+
+ // Second fetch should use cache
+ const result2 = await getOrSetCache("test-key", 30, fetchData);
+ expect(result2).toBeDefined();
+ expect(fetchCount).toBe(1); // Still 1 because we used cache
+
+ // Different key should call function again
+ const result3 = await getOrSetCache("other-key", 30, fetchData);
+ expect(result3).toBeDefined();
+ expect(fetchCount).toBe(2);
+
+ // Verify cache entry has TTL
+ const ttl = await ctx.redis.ttl(`${cachePrefix}:test-key`);
+ expect(ttl).toBeGreaterThan(0);
+ expect(ttl).toBeLessThanOrEqual(30);
+ });
+
+ test("should implement a simple leaderboard", async () => {
+ const leaderboardKey = ctx.generateKey("leaderboard");
+
+ // Add scores
+ await ctx.redis.send("ZADD", [leaderboardKey, "100", "player1"]);
+ await ctx.redis.send("ZADD", [leaderboardKey, "200", "player2"]);
+ await ctx.redis.send("ZADD", [leaderboardKey, "150", "player3"]);
+ await ctx.redis.send("ZADD", [leaderboardKey, "300", "player4"]);
+ await ctx.redis.send("ZADD", [leaderboardKey, "50", "player5"]);
+
+ // Get top 3 players (highest scores)
+ const topPlayers = await ctx.redis.send("ZREVRANGE", [leaderboardKey, "0", "2", "WITHSCORES"]);
+
+ expect(topPlayers).toMatchInlineSnapshot(`
+ [
+ [
+ "player4",
+ 300,
+ ],
+ [
+ "player2",
+ 200,
+ ],
+ [
+ "player3",
+ 150,
+ ],
+ ]
+ `);
+
+ // Get player rank (0-based)
+ const player3Rank = await ctx.redis.send("ZREVRANK", [leaderboardKey, "player3"]);
+ expect(player3Rank).toBe(2); // 0-based index, so 3rd place is 2
+
+ // Get player score
+ const player3Score = await ctx.redis.send("ZSCORE", [leaderboardKey, "player3"]);
+ expect(player3Score).toBe(150);
+
+ // Increment a score
+ await ctx.redis.send("ZINCRBY", [leaderboardKey, "25", "player3"]);
+
+ // Verify score was updated
+ const updatedScore = await ctx.redis.send("ZSCORE", [leaderboardKey, "player3"]);
+ expect(updatedScore).toBe(175);
+
+ // Get updated rank
+ const updatedRank = await ctx.redis.send("ZREVRANK", [leaderboardKey, "player3"]);
+ expect(updatedRank).toBe(2); // Still in third place
+
+ // Get count of players with scores between 100 and 200
+ const countInRange = await ctx.redis.send("ZCOUNT", [leaderboardKey, "100", "200"]);
+ expect(countInRange).toBe(3); // player1, player3, player2
+ });
+ });
+
+ describe("Distributed Locks", () => {
+ test("should implement a simple distributed lock", async () => {
+ const lockName = ctx.generateKey("lock-resource");
+ const lockValue = randomUUIDv7(); // Unique identifier for the owner
+ const lockTimeout = 10; // seconds
+
+ // Acquire the lock
+ const acquireResult = await ctx.redis.send("SET", [
+ lockName,
+ lockValue,
+ "NX", // Only set if key doesn't exist
+ "EX",
+ lockTimeout.toString(),
+ ]);
+
+ // Should acquire the lock successfully
+ expect(acquireResult).toBe("OK");
+
+ // Try to acquire again (should fail as it's already locked)
+ const retryResult = await ctx.redis.send("SET", [lockName, "other-value", "NX", "EX", lockTimeout.toString()]);
+
+ // Should return null (lock not acquired)
+ expect(retryResult).toBeNull();
+
+ // LUA script for safe release (only release if we own the lock)
+ const releaseLockScript = `
+ if redis.call("get", KEYS[1]) == ARGV[1] then
+ return redis.call("del", KEYS[1])
+ else
+ return 0
+ end
+ `;
+
+ // Release the lock
+ const releaseResult = await ctx.redis.send("EVAL", [
+ releaseLockScript,
+ "1", // Number of keys
+ lockName, // KEYS[1]
+ lockValue, // ARGV[1]
+ ]);
+
+ // Should return 1 (lock released)
+ expect(releaseResult).toBe(1);
+
+ // Try to release again (should fail as lock is gone)
+ const reReleaseResult = await ctx.redis.send("EVAL", [releaseLockScript, "1", lockName, lockValue]);
+
+ // Should return 0 (no lock to release)
+ expect(reReleaseResult).toBe(0);
+
+ // Verify lock is gone
+ const finalCheck = await ctx.redis.get(lockName);
+ expect(finalCheck).toBeNull();
+ });
+ });
+});
diff --git a/test/js/valkey/reliability/connection-failures.test.ts b/test/js/valkey/reliability/connection-failures.test.ts
new file mode 100644
index 0000000000..a615438998
--- /dev/null
+++ b/test/js/valkey/reliability/connection-failures.test.ts
@@ -0,0 +1,337 @@
+import { describe, test, expect, mock, beforeEach, afterEach } from "bun:test";
+import { randomUUIDv7, RedisClient } from "bun";
+import { DEFAULT_REDIS_OPTIONS, DEFAULT_REDIS_URL, delay, isEnabled, retry, testKey } from "../test-utils";
+
+/**
+ * Test suite for connection failures, reconnection, and error handling
+ * - Connection failures
+ * - Reconnection behavior
+ * - Timeout handling
+ * - Error propagation
+ */
+describe.skipIf(!isEnabled)("Valkey: Connection Failures", () => {
+ // Use invalid port to force connection failure
+ const BAD_CONNECTION_URL = "redis://localhost:12345";
+
+ describe("Connection Failure Handling", () => {
+ test("should handle initial connection failure gracefully", async () => {
+ // Create client with invalid port to force connection failure
+ const client = new RedisClient(BAD_CONNECTION_URL, {
+ connectionTimeout: 500, // Short timeout
+ autoReconnect: false, // Disable auto reconnect to simplify the test
+ });
+
+ try {
+ // Attempt to send command - should fail with connection error
+ await client.set("key", "value");
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ // Expect an error with connection closed message
+ expect(error.message).toMatch(/connection closed|socket closed|failed to connect/i);
+ } finally {
+ // Cleanup
+ await client.disconnect();
+ }
+ });
+
+ test("should reject commands with appropriate errors when disconnected", async () => {
+ // Create client with invalid connection
+ const client = new RedisClient(BAD_CONNECTION_URL, {
+ connectionTimeout: 500,
+ autoReconnect: false,
+ enableOfflineQueue: false, // Disable offline queue to test immediate rejection
+ });
+
+ // Verify the client is not connected
+ expect(client.connected).toBe(false);
+
+ // Try commands individually to make sure they fail properly
+ try {
+ await client.get("any-key");
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ // Should fail with connection error
+ expect(error.message).toMatch(/connection closed|socket closed|failed to connect|offline queue is disabled/i);
+ }
+
+ try {
+ await client.set("any-key", "value");
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ // Should fail with connection error
+ expect(error.message).toMatch(/connection closed|socket closed|failed to connect|offline queue is disabled/i);
+ }
+
+ try {
+ await client.del("any-key");
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ // Should fail with connection error
+ expect(error.message).toMatch(/connection closed|socket closed|failed to connect|offline queue is disabled/i);
+ }
+
+ try {
+ await client.incr("counter");
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ // Should fail with connection error
+ expect(error.message).toMatch(/connection closed|socket closed|failed to connect|offline queue is disabled/i);
+ }
+ });
+
+ test("should handle connection timeout", async () => {
+ // Use a non-routable IP address with a very short timeout
+ const client = new RedisClient("redis://192.0.2.1:6379", {
+ connectionTimeout: 2, // 2ms second timeout
+ autoReconnect: false,
+ });
+ expect(async () => {
+ await client.get("any-key");
+ }).toThrowErrorMatchingInlineSnapshot(`"Connection timeout reached after 2ms"`);
+ });
+
+ test("should report correct connected status", async () => {
+ // Create client with invalid connection
+ const client = new RedisClient(BAD_CONNECTION_URL, {
+ connectionTimeout: 500,
+ autoReconnect: false,
+ });
+
+ // Should report disconnected state
+ expect(client.connected).toBe(false);
+
+ try {
+ // Try to send command to ensure connection attempt
+ await client.get("key");
+ } catch (error) {
+ // Expected error
+ }
+
+ // Should still report disconnected
+ expect(client.connected).toBe(false);
+
+ await client.disconnect();
+ });
+ });
+
+ describe("Reconnection Behavior", () => {
+ // Use a shorter timeout to avoid test hanging
+ test("should reject commands when offline queue is enabled", async () => {
+ // Create client with invalid connection but with offline queue enabled
+ const client = new RedisClient(BAD_CONNECTION_URL, {
+ connectionTimeout: 100, // Very short timeout
+ autoReconnect: false, // Disable auto-reconnect to avoid waiting for retries
+ enableOfflineQueue: true,
+ });
+
+ // Try to send a command - it should be queued but eventually fail
+ // when the connection timeout is reached
+ const commandPromise = client.set("key1", "value1");
+
+ try {
+ await commandPromise;
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ // Should fail with a connection error
+ expect(error.message).toMatch(/connection closed|socket closed|failed to connect/i);
+ }
+
+ await client.disconnect();
+ });
+
+ test("should reject commands when offline queue is disabled", async () => {
+ // Create client with invalid connection and offline queue disabled
+ const client = new RedisClient(BAD_CONNECTION_URL, {
+ connectionTimeout: 500,
+ autoReconnect: true,
+ enableOfflineQueue: false,
+ });
+
+ try {
+ // Try to send command - should reject immediately
+ await client.set("key", "value");
+ expect(false).toBe(true); // Should not reach here
+ } catch (error) {
+ expect(error.message).toMatch(/connection closed|offline queue is disabled/i);
+ }
+
+ await client.disconnect();
+ });
+
+ // Skip this test since it's hard to reliably wait for max retries in a test environment
+ test.skip("should stop reconnection attempts after max retries", async () => {
+ // This test is unreliable in a test environment, as it would need to wait
+ // for all retry attempts which could cause timeouts
+ });
+ });
+
+ describe("Connection Event Callbacks", () => {
+ // Only test this if Redis is available
+ test("onconnect and onclose handlers", async () => {
+ // Try connecting to the default Redis URL
+ const client = new RedisClient(DEFAULT_REDIS_URL, DEFAULT_REDIS_OPTIONS);
+
+ // Set up event handlers
+ const onconnect = mock(() => {});
+ const onclose = mock(() => {});
+ client.onconnect = onconnect;
+ client.onclose = onclose;
+ await client.set("__test_key", "test-value");
+
+ // If we get here, connection succeeded, so we should check connect callback
+ expect(client.connected).toBe(true);
+ expect(onconnect).toHaveBeenCalled();
+
+ // Explicitly disconnect to trigger onclose
+ client.disconnect();
+
+ // Wait a short time for disconnect callbacks to execute
+ await delay(50);
+
+ // onclose should be called regardless of whether the connection succeeded
+ expect(client.connected).toBe(false);
+ expect(onclose).toHaveBeenCalled();
+
+ expect(onconnect).toHaveBeenCalledTimes(1);
+ expect(onclose).toHaveBeenCalledTimes(1);
+ });
+ test("should support changing onconnect and onclose handlers", async () => {
+ const client = new RedisClient(DEFAULT_REDIS_URL, DEFAULT_REDIS_OPTIONS);
+
+ // Create mock handlers
+ const onconnect1 = mock(() => {});
+ const onclose1 = mock(() => {});
+ const onconnect2 = mock(() => {});
+ const onclose2 = mock(() => {});
+
+ // Set initial handlers
+ client.onconnect = onconnect1;
+ client.onclose = onclose1;
+
+ // Change handlers
+ client.onconnect = onconnect2;
+ client.onclose = onclose2;
+
+ try {
+ // Try to initialize connection
+ await client.set("__test_key", "test-value");
+ } catch (error) {
+ // Connection failed, but we can still test onclose
+ }
+
+ // Disconnect to trigger close handler
+ await client.disconnect();
+
+ // Wait a short time for the callbacks to execute
+ await delay(50);
+
+ // First handlers should not have been called because they were replaced
+ expect(onconnect1).not.toHaveBeenCalled();
+ expect(onclose1).not.toHaveBeenCalled();
+
+ // Second handlers should have been called
+ expect(onclose2).toHaveBeenCalled();
+
+ // If connection succeeded, the connect handler should have been called
+ if (client.connected) {
+ expect(onconnect2).toHaveBeenCalled();
+ }
+ });
+ });
+
+ describe("Handling Manually Closed Connections", () => {
+ test("should not auto-reconnect when manually closed", async () => {
+ // Set up a client
+ const client = new RedisClient(DEFAULT_REDIS_URL, {
+ ...DEFAULT_REDIS_OPTIONS,
+ autoReconnect: true,
+ });
+
+ // Try to initialize connection
+ await client.set("__test_key", "test-value");
+
+ // Manually disconnect
+ client.disconnect();
+
+ // Try to send a command
+ expect(client.connected).toBe(false);
+ expect(async () => {
+ await client.get("__test_key");
+ }).toThrowErrorMatchingInlineSnapshot(`"Connection has failed"`);
+ // Wait some time to see if auto-reconnect happens
+ await delay(50);
+
+ // Should still be disconnected
+ expect(client.connected).toBe(false);
+ });
+
+ test("should clean up resources when disconnected", async () => {
+ // Create a client with no auto reconnect to simplify test
+ const client = new RedisClient(BAD_CONNECTION_URL, {
+ autoReconnect: false,
+ connectionTimeout: 100,
+ });
+
+ // Disconnect immediately
+ await client.disconnect();
+
+ expect(client.connected).toBe(false);
+ expect(async () => {
+ await client.get("any-key");
+ }).toThrowErrorMatchingInlineSnapshot(`"Connection closed"`);
+ // Multiple disconnects should not cause issues
+ await client.disconnect();
+ await client.disconnect();
+ });
+ });
+
+ describe("Multiple Connection Attempts", () => {
+ test("should handle rapid connection/disconnection", async () => {
+ // Create and immediately disconnect many clients
+ const promises = [];
+
+ for (let i = 0; i < 10; i++) {
+ const client = new RedisClient(DEFAULT_REDIS_URL, {
+ ...DEFAULT_REDIS_OPTIONS,
+ connectionTimeout: 500,
+ });
+
+ // Immediately disconnect
+ promises.push(client.disconnect());
+ }
+
+ // All should resolve without errors
+ await Promise.all(promises);
+ });
+
+ test("should not crash when connections fail", async () => {
+ // Create multiple clients with invalid connections in parallel
+ const clients = [];
+
+ for (let i = 0; i < 5; i++) {
+ clients.push(
+ new RedisClient(BAD_CONNECTION_URL, {
+ connectionTimeout: 200,
+ autoReconnect: false,
+ }),
+ );
+ }
+
+ // Try sending commands to all clients
+ const promises = clients.map(client =>
+ client.get("key").catch(err => {
+ // We expect errors, but want to make sure they're the right kind
+ expect(err.message).toMatch(/connection closed|socket closed|failed to connect/i);
+ }),
+ );
+
+ // All should reject without crashing
+ await Promise.all(promises);
+
+ // Clean up
+ for (const client of clients) {
+ await client.disconnect();
+ }
+ });
+ });
+});
diff --git a/test/js/valkey/reliability/error-handling.test.ts b/test/js/valkey/reliability/error-handling.test.ts
new file mode 100644
index 0000000000..48050b215d
--- /dev/null
+++ b/test/js/valkey/reliability/error-handling.test.ts
@@ -0,0 +1,285 @@
+import { randomUUIDv7, RedisClient } from "bun";
+import { describe, expect, test, beforeEach } from "bun:test";
+import { createClient, DEFAULT_REDIS_URL, ctx, ConnectionType, isEnabled } from "../test-utils";
+
+/**
+ * Test suite for error handling, protocol failures, and edge cases
+ * - Command errors (wrong arguments, invalid syntax)
+ * - Protocol parsing failures
+ * - Null/undefined/invalid input handling
+ * - Type errors
+ * - Edge cases
+ */
+describe.skipIf(!isEnabled)("Valkey: Error Handling", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+ describe("Command Errors", () => {
+ test("should handle invalid command arguments", async () => {
+ const client = ctx.redis;
+
+ // Wrong number of arguments
+
+ expect(async () => await client.send("SET", ["key"])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR wrong number of arguments for 'set' command"`,
+ ); // Missing value argument
+ expect(async () => await client.send("INVALID_COMMAND", ["a"])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR unknown command 'INVALID_COMMAND', with args beginning with: 'a' "`,
+ ); // Invalid command
+ });
+
+ describe("should handle special character keys and values", async () => {
+ // Keys with special characters
+ const specialKeys = [
+ "key with spaces",
+ "key\nwith\nnewlines",
+ "key\twith\ttabs",
+ "key:with:colons",
+ "key-with-unicode-♥-❤-★",
+ ];
+
+ // Values with special characters
+ const specialValues = [
+ "value with spaces",
+ "value\nwith\nnewlines",
+ "value\twith\ttabs",
+ "value:with:colons",
+ "value-with-unicode-♥-❤-★",
+ // RESP protocol special characters
+ "+OK\r\n",
+ "-ERR\r\n",
+ "$5\r\nhello\r\n",
+ "*2\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",
+ ];
+
+ for (const key of specialKeys) {
+ for (const value of specialValues) {
+ const testKey = `special-key-${randomUUIDv7()}`;
+ test(`should handle special characters in key "${key}" and value "${value}"`, async () => {
+ const client = ctx.redis;
+ // Set and get should work with special characters
+ await client.set(testKey, value);
+ const result = await client.get(testKey);
+ expect(result).toBe(value);
+ });
+ }
+ }
+ });
+ });
+
+ describe("Null/Undefined/Invalid Input Handling", () => {
+ test.only("should handle undefined/null command arguments", async () => {
+ const client = ctx.redis;
+
+ // undefined key
+ // @ts-expect-error: Testing runtime behavior with invalid types
+ expect(async () => await client.get(undefined)).toThrowErrorMatchingInlineSnapshot(
+ `"Expected key to be a string or buffer for 'get'."`,
+ );
+
+ // null key
+ // @ts-expect-error: Testing runtime behavior with invalid types
+ expect(async () => await client.get(null)).toThrowErrorMatchingInlineSnapshot(
+ `"Expected key to be a string or buffer for 'get'."`,
+ );
+
+ // undefined value
+ // @ts-expect-error: Testing runtime behavior with invalid types
+ expect(async () => await client.set("valid-key", undefined)).toThrowErrorMatchingInlineSnapshot(
+ `"Expected value to be a string or buffer for 'set'."`,
+ );
+
+ expect(async () => await client.set("valid-key", null)).toThrowErrorMatchingInlineSnapshot(
+ `"Expected value to be a string or buffer for 'set'."`,
+ );
+ });
+
+ test("should handle invalid sendCommand inputs", async () => {
+ const client = ctx.redis;
+
+ // Undefined command
+ // @ts-expect-error: Testing runtime behavior with invalid types
+ expect(async () => await client.send(undefined, [])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR unknown command 'undefined', with args beginning with: "`,
+ );
+
+ // Invalid args type
+ // @ts-expect-error: Testing runtime behavior with invalid types
+ expect(async () => await client.send("GET", "not-an-array")).toThrowErrorMatchingInlineSnapshot(
+ `"Arguments must be an array"`,
+ );
+
+ // Non-string command
+ // @ts-expect-error: Testing runtime behavior with invalid types
+ expect(async () => await client.send(123, [])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR unknown command '123', with args beginning with: "`,
+ );
+ });
+ });
+
+ describe("Protocol and Parser Edge Cases", () => {
+ test("should handle various data types correctly", async () => {
+ const client = ctx.redis;
+
+ // Integer/string conversions
+ await client.set("int-key", "42");
+
+ // INCR should return as number
+ const incrResult = await client.incr("int-key");
+ expect(typeof incrResult).toBe("number");
+ expect(incrResult).toBe(43);
+
+ // GET should return as string
+ const getResult = await client.get("int-key");
+ expect(typeof getResult).toBe("string");
+ expect(getResult).toBe("43");
+
+ // Boolean handling for EXISTS command
+ await client.set("exists-key", "value");
+ const existsResult = await client.exists("exists-key");
+ expect(typeof existsResult).toBe("boolean");
+ expect(existsResult).toBe(true);
+
+ const notExistsResult = await client.exists("not-exists-key");
+ expect(typeof notExistsResult).toBe("boolean");
+ expect(notExistsResult).toBe(false);
+
+ // Null handling for non-existent keys
+ const nullResult = await client.get("not-exists-key");
+ expect(nullResult).toBeNull();
+ });
+
+ test("should handle complex RESP3 types", async () => {
+ const client = ctx.redis;
+
+ // HGETALL returns object in RESP3
+ const hashKey = `hash-${randomUUIDv7()}`;
+ await client.send("HSET", [hashKey, "field1", "value1", "field2", "value2"]);
+
+ const hashResult = await client.send("HGETALL", [hashKey]);
+
+ // Hash results should be objects in RESP3
+ expect(typeof hashResult).toBe("object");
+ expect(hashResult).not.toBeNull();
+
+ if (hashResult !== null) {
+ expect(hashResult.field1).toBe("value1");
+ expect(hashResult.field2).toBe("value2");
+ }
+
+ // Error type handling
+ expect(async () => await client.send("HGET", [])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR wrong number of arguments for 'hget' command"`,
+ ); // Missing key and field
+
+ // NULL handling from various commands
+ const nullResult = await client.send("HGET", [hashKey, "nonexistent"]);
+ expect(nullResult).toBeNull();
+ });
+
+ test("should handle RESP protocol boundaries", async () => {
+ const client = ctx.redis;
+
+ // Mix of command types to stress protocol parser
+ const commands = [
+ client.set("key1", "value1"),
+ client.get("key1"),
+ client.send("PING", []),
+ client.incr("counter"),
+ client.exists("key1"),
+ client.send("HSET", ["hash", "field", "value"]),
+ client.send("HGETALL", ["hash"]),
+ client.set("key2", "x".repeat(1000)), // Larger value
+ client.get("key2"),
+ ];
+
+ // Run all commands in parallel to stress protocol handling
+ await Promise.all(commands);
+
+ // Verify data integrity
+
+ expect(await client.get("key1")).toBe("value1");
+ expect(await client.exists("key1")).toBe(true);
+ expect(await client.get("key2")).toBe("x".repeat(1000));
+ expect(await client.send("HGET", ["hash", "field"])).toBe("value");
+ });
+ });
+
+ describe("Resource Management and Edge Cases", () => {
+ test("should handle very large number of parallel commands", async () => {
+ const client = ctx.redis;
+
+ // Create a large number of parallel commands
+ const parallelCount = 1000;
+ const commands = [];
+
+ for (let i = 0; i < parallelCount; i++) {
+ const key = `parallel-key-${i}`;
+ commands.push(client.set(key, `value-${i}`));
+ }
+
+ // Execute all in parallel
+ await Promise.all(commands);
+
+ // Verify some random results
+ for (let i = 0; i < 10; i++) {
+ const index = Math.floor(Math.random() * parallelCount);
+ const key = `parallel-key-${index}`;
+ const value = await client.get(key);
+ expect(value).toBe(`value-${index}`);
+ }
+ });
+
+ test("should handle many rapid sequential commands", async () => {
+ const client = ctx.redis;
+
+ // Create many sequential commands
+ const sequentialCount = 500;
+
+ for (let i = 0; i < sequentialCount; i++) {
+ const key = `sequential-key-${i}`;
+ await client.set(key, `value-${i}`);
+
+ // Periodically verify to ensure integrity
+ if (i % 50 === 0) {
+ const value = await client.get(key);
+ expect(value).toBe(`value-${i}`);
+ }
+ }
+ });
+
+ test("should handle command after disconnect and reconnect", async () => {
+ const client = ctx.redis;
+
+ // Set initial value
+ const key = `reconnect-key-${randomUUIDv7()}`;
+ await client.set(key, "initial-value");
+
+ // Disconnect explicitly
+ client.disconnect();
+
+ // This command should fail
+ expect(async () => await client.get(key)).toThrowErrorMatchingInlineSnapshot(`"Connection has failed"`);
+ });
+
+ test("should handle binary data", async () => {
+ // Binary data in both keys and values
+ const client = ctx.redis;
+
+ // Create Uint8Array with binary data
+ const binaryData = new Uint8Array([0, 1, 2, 3, 255, 254, 253, 252]);
+ const binaryString = String.fromCharCode(...binaryData);
+
+ await client.set("binary-key", binaryString);
+
+ // Get it back
+ const result = await client.get("binary-key");
+
+ // Compare binary data
+ expect(result).toBe(binaryString);
+ });
+ });
+});
diff --git a/test/js/valkey/reliability/protocol-handling.test.ts b/test/js/valkey/reliability/protocol-handling.test.ts
new file mode 100644
index 0000000000..294619ff86
--- /dev/null
+++ b/test/js/valkey/reliability/protocol-handling.test.ts
@@ -0,0 +1,401 @@
+import { beforeEach, describe, expect, test } from "bun:test";
+import { ConnectionType, createClient, ctx, isEnabled, testKey } from "../test-utils";
+
+/**
+ * Test suite for RESP protocol handling, focusing on edge cases
+ * - RESP3 data types handling
+ * - Protocol parsing edge cases
+ * - Bulk string/array handling
+ * - Special value encoding/decoding
+ */
+describe.skipIf(!isEnabled)("Valkey: Protocol Handling", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+
+ describe("RESP3 Data Type Handling", () => {
+ test("should handle RESP3 Map type (HGETALL)", async () => {
+ // Create a hash with multiple fields
+ const hashKey = testKey("map-test");
+ await ctx.redis.send("HSET", [
+ hashKey,
+ "field1",
+ "value1",
+ "field2",
+ "value2",
+ "number",
+ "42",
+ "empty",
+ "",
+ "special",
+ "hello\r\nworld",
+ ]);
+
+ // Get as RESP3 Map via HGETALL
+ const mapResult = await ctx.redis.send("HGETALL", [hashKey]);
+
+ expect(mapResult).toMatchInlineSnapshot(`
+ {
+ "empty": "",
+ "field1": "value1",
+ "field2": "value2",
+ "number": "42",
+ "special":
+ "hello
+ world"
+ ,
+ }
+ `);
+ });
+
+ test("should handle RESP3 Set type", async () => {
+ // Create a set with multiple members
+ const setKey = testKey("set-test");
+ await ctx.redis.send("SADD", [setKey, "member1", "member2", "42", "", "special \r\n character"]);
+
+ // Get as RESP3 Set via SMEMBERS
+ const setResult = await ctx.redis.send("SMEMBERS", [setKey]);
+
+ expect(JSON.stringify(setResult)).toMatchInlineSnapshot(
+ `"["member1","member2","42","","special \\r\\n character"]"`,
+ );
+ });
+
+ test("should handle RESP3 Boolean type", async () => {
+ const key = testKey("bool-test");
+ await ctx.redis.set(key, "value");
+
+ // EXISTS returns Boolean in RESP3
+ const existsResult = await ctx.redis.exists(key);
+ expect(typeof existsResult).toBe("boolean");
+ expect(existsResult).toBe(true);
+
+ // Non-existent key
+ const notExistsResult = await ctx.redis.exists(testKey("nonexistent"));
+ expect(typeof notExistsResult).toBe("boolean");
+ expect(notExistsResult).toBe(false);
+ });
+
+ test("should handle RESP3 Number types", async () => {
+ const counterKey = testKey("counter");
+ // Various numeric commands to test number handling
+
+ // INCR returns integer
+ const incrResult = await ctx.redis.incr(counterKey);
+ expect(typeof incrResult).toBe("number");
+ expect(incrResult).toBe(1);
+
+ // Use INCRBYFLOAT to test double/float handling
+ const doubleResult = await ctx.redis.send("INCRBYFLOAT", [counterKey, "1.5"]);
+ // Some Redis versions return this as string, either is fine
+ expect(doubleResult === "2.5" || doubleResult === 2.5).toBe(true);
+
+ // Use HINCRBYFLOAT to test another float command
+ const hashKey = testKey("hash-float");
+ const hashDoubleResult = await ctx.redis.send("HINCRBYFLOAT", [hashKey, "field", "10.5"]);
+ // Should be string or number
+ expect(hashDoubleResult === "10.5" || hashDoubleResult === 10.5).toBe(true);
+ });
+
+ test("should handle RESP3 Null type", async () => {
+ // GET non-existent key
+ const nullResult = await ctx.redis.get(testKey("nonexistent"));
+ expect(nullResult).toBeNull();
+
+ // HGET non-existent field
+ const hashKey = testKey("hash");
+ await ctx.redis.send("HSET", [hashKey, "existing", "value"]);
+
+ const nullFieldResult = await ctx.redis.send("HGET", [hashKey, "nonexistent"]);
+ expect(nullFieldResult).toBeNull();
+
+ // BLPOP with timeout
+ const listKey = testKey("empty-list");
+ const timeoutResult = await ctx.redis.send("BLPOP", [listKey, "1"]);
+ expect(timeoutResult).toBeNull();
+ });
+
+ test("should handle RESP3 Error type", async () => {
+ expect(async () => await ctx.redis.send("GET", [])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR wrong number of arguments for 'get' command"`,
+ );
+
+ expect(async () => await ctx.redis.send("SYNTAX-ERROR", [])).toThrowErrorMatchingInlineSnapshot(
+ `"ERR unknown command 'SYNTAX-ERROR', with args beginning with: "`,
+ );
+ });
+ });
+
+ describe("Protocol Parsing Edge Cases", () => {
+ test("should handle nested array structures", async () => {
+ // XRANGE returns array of arrays
+ const streamKey = testKey("stream");
+
+ // Add entries to stream
+ await ctx.redis.send("XADD", [streamKey, "*", "field1", "value1", "field2", "value2"]);
+ await ctx.redis.send("XADD", [streamKey, "*", "field1", "value3", "field2", "value4"]);
+
+ // Get range
+ const rangeResult = await ctx.redis.send("XRANGE", [streamKey, "-", "+"]);
+
+ // Should get array of arrays
+ expect(Array.isArray(rangeResult)).toBe(true);
+ expect(rangeResult.length).toBe(2);
+
+ // First entry should have ID and fields
+ const firstEntry = rangeResult[0];
+ expect(Array.isArray(firstEntry)).toBe(true);
+ expect(firstEntry.length).toBe(2);
+
+ // ID should be string
+ expect(typeof firstEntry[0]).toBe("string");
+
+ // Fields should be array of field-value pairs in RESP2 or object in RESP3
+ const fields = firstEntry[1];
+ if (Array.isArray(fields)) {
+ // RESP2 style
+ expect(fields.length % 2).toBe(0); // Even number for field-value pairs
+ } else if (typeof fields === "object" && fields !== null) {
+ // RESP3 style (map)
+ expect(fields.field1).toBeTruthy();
+ expect(fields.field2).toBeTruthy();
+ }
+ });
+
+ test("should handle empty strings in bulk strings", async () => {
+ // Set empty string value
+ const key = testKey("empty-string");
+ await ctx.redis.set(key, "");
+
+ // Get it back
+ const result = await ctx.redis.get(key);
+ expect(result).toBe("");
+
+ // HSET with empty field and/or value
+ const hashKey = testKey("hash-empty");
+ await ctx.redis.send("HSET", [hashKey, "empty-field", ""]);
+ await ctx.redis.send("HSET", [hashKey, "", "empty-field-name"]);
+
+ // Get them back
+ const emptyValue = await ctx.redis.send("HGET", [hashKey, "empty-field"]);
+ expect(emptyValue).toBe("");
+
+ const emptyField = await ctx.redis.send("HGET", [hashKey, ""]);
+ expect(emptyField).toBe("empty-field-name");
+ });
+
+ test("should handle large arrays", async () => {
+ // Create a large set
+ const setKey = testKey("large-set");
+ const itemCount = 10000;
+
+ // Add many members (in chunks to avoid huge command)
+ for (let i = 0; i < itemCount; i += 100) {
+ const members = [];
+ for (let j = 0; j < 100 && i + j < itemCount; j++) {
+ members.push(`member-${i + j}`);
+ }
+ await ctx.redis.send("SADD", [setKey, ...members]);
+ }
+
+ // Get all members
+ const membersResult = await ctx.redis.send("SMEMBERS", [setKey]);
+
+ // Should get all members back
+ expect(Array.isArray(membersResult)).toBe(true);
+ expect(membersResult.length).toBe(itemCount);
+
+ // Check a few random members
+ for (let i = 0; i < 10; i++) {
+ const index = Math.floor(Math.random() * itemCount);
+ expect(membersResult).toContain(`member-${index}`);
+ }
+ });
+
+ test("should handle very large bulk strings", async () => {
+ // Create various sized strings
+ const sizes = [
+ 1024, // 1KB
+ 10 * 1024, // 10KB
+ 100 * 1024, // 100KB
+ 1024 * 1024, // 1MB
+ ];
+
+ for (const size of sizes) {
+ const key = testKey(`large-string-${size}`);
+ const value = Buffer.alloc(size, "x").toString();
+
+ // Set the large value
+ await ctx.redis.set(key, value);
+
+ // Get it back
+ const result = await ctx.redis.get(key);
+
+ // Should be same length
+ expect(result?.length).toBe(size);
+ expect(result).toBe(value);
+ }
+ });
+
+ test("should handle binary data", async () => {
+ // Create binary data with full byte range
+ const binaryData = new Uint8Array(256);
+ for (let i = 0; i < 256; i++) {
+ binaryData[i] = i;
+ }
+
+ // Convert to string for Redis storage
+ const binaryString = String.fromCharCode(...binaryData);
+
+ // Store binary data
+ const key = testKey("binary-data");
+ await ctx.redis.set(key, binaryString);
+
+ // Get it back
+ const result = await ctx.redis.get(key);
+
+ // Should have same length
+ expect(result?.length).toBe(binaryString.length);
+
+ // Verify each byte
+ if (result) {
+ for (let i = 0; i < Math.min(256, result.length); i++) {
+ expect(result.charCodeAt(i)).toBe(i);
+ }
+ }
+ });
+ });
+
+ describe("Special Value Handling", () => {
+ test("should handle RESP protocol delimiter characters", async () => {
+ try {
+ // Set values containing RESP delimiters
+ const testCases = [
+ { key: testKey("cr"), value: "contains\rcarriage\rreturn" },
+ { key: testKey("lf"), value: "contains\nline\nfeed" },
+ { key: testKey("crlf"), value: "contains\r\ncrlf\r\ndelimiters" },
+ { key: testKey("mixed"), value: "mixed\r\n\r\n\r\ndelimiters" },
+ ];
+
+ for (const { key, value } of testCases) {
+ await ctx.redis.set(key, value);
+
+ const result = await ctx.redis.get(key);
+ expect(result).toBe(value);
+ }
+ } catch (error) {
+ console.warn("RESP delimiter test failed:", error.message);
+ throw error;
+ }
+ });
+
+ test("should handle special RESP types in data", async () => {
+ const client = ctx.redis;
+
+ try {
+ // Values that might confuse RESP parser if not properly handled
+ const testCases = [
+ { key: testKey("plus"), value: "+OK\r\n" }, // Simple string prefix
+ { key: testKey("minus"), value: "-ERR\r\n" }, // Error prefix
+ { key: testKey("colon"), value: ":123\r\n" }, // Integer prefix
+ { key: testKey("dollar"), value: "$5\r\nhello\r\n" }, // Bulk string format
+ { key: testKey("asterisk"), value: "*2\r\n$3\r\nfoo\r\n$3\r\nbar\r\n" }, // Array format
+ ];
+
+ for (const { key, value } of testCases) {
+ await client.set(key, value);
+
+ const result = await client.get(key);
+ expect(result).toBe(value);
+ }
+ } catch (error) {
+ console.warn("RESP types in data test failed:", error.message);
+ throw error;
+ }
+ });
+ });
+
+ describe.todo("RESP3 Push Type Handling", () => {});
+
+ describe("Extreme Protocol Conditions", () => {
+ test("should handle rapidly switching between command types", async () => {
+ const client = ctx.redis;
+
+ try {
+ // Rapidly alternate between different command types
+ // to stress protocol parser context switching
+ const iterations = 100;
+ const prefix = testKey("rapid");
+
+ for (let i = 0; i < iterations; i++) {
+ // String operations
+ await client.set(`${prefix}-str-${i}`, `value-${i}`);
+ await client.get(`${prefix}-str-${i}`);
+
+ // Integer operations
+ await client.incr(`${prefix}-int-${i}`);
+
+ // Array result operations
+ await client.send("KEYS", [`${prefix}-str-${i}`]);
+
+ // Hash operations (map responses)
+ await client.send("HSET", [`${prefix}-hash-${i}`, "field", "value"]);
+ await client.send("HGETALL", [`${prefix}-hash-${i}`]);
+
+ // Set operations
+ await client.send("SADD", [`${prefix}-set-${i}`, "member1", "member2"]);
+ await client.send("SMEMBERS", [`${prefix}-set-${i}`]);
+ }
+
+ // If we got here without protocol parse errors, test passes
+ } catch (error) {
+ console.warn("Rapid command switching test failed:", error.message);
+
+ throw error;
+ }
+ });
+
+ test("should handle simultaneous command streams", async () => {
+ // Create multiple clients for parallel operations
+ const clientCount = 5;
+ const clients = Array.from({ length: clientCount }, () => createClient());
+
+ try {
+ // Run many operations in parallel across clients
+ const operationsPerClient = 20;
+ const prefix = testKey("parallel");
+
+ const allPromises = clients.flatMap((client, clientIndex) => {
+ const promises = [];
+
+ for (let i = 0; i < operationsPerClient; i++) {
+ const key = `${prefix}-c${clientIndex}-${i}`;
+
+ // Mix of operation types
+ promises.push(client.set(key, `value-${i}`));
+ promises.push(client.get(key));
+ promises.push(client.incr(`${key}-counter`));
+ promises.push(client.send("HSET", [`${key}-hash`, "field", "value"]));
+ }
+
+ return promises;
+ });
+
+ // Run all operations simultaneously
+ await Promise.all(allPromises);
+
+ // If we got here without errors, test passes
+ } catch (error) {
+ console.warn("Parallel client test failed:", error.message);
+
+ throw error;
+ } finally {
+ // Clean up clients
+ await Promise.all(clients.map(client => client.disconnect()));
+ }
+ });
+ });
+});
diff --git a/test/js/valkey/test-utils.ts b/test/js/valkey/test-utils.ts
new file mode 100644
index 0000000000..805dbd366d
--- /dev/null
+++ b/test/js/valkey/test-utils.ts
@@ -0,0 +1,691 @@
+import { RedisClient, type SpawnOptions } from "bun";
+import { afterAll, beforeAll, expect } from "bun:test";
+import { bunEnv, isCI, randomPort, tempDirWithFiles } from "harness";
+import path from "path";
+
+const dockerCLI = Bun.which("docker") as string;
+export const isEnabled =
+ !!dockerCLI &&
+ (() => {
+ try {
+ const info = Bun.spawnSync({
+ cmd: [dockerCLI, "info"],
+ stdout: "pipe",
+ stderr: "inherit",
+ env: bunEnv,
+ });
+ return info.stdout.toString().indexOf("Server Version:") !== -1;
+ } catch (error) {
+ return false;
+ }
+ })();
+
+/**
+ * Test utilities for Valkey/Redis tests
+ *
+ * Available direct methods (avoid using .send() for these):
+ * - get(key): Get value of a key
+ * - set(key, value): Set value of a key
+ * - del(key): Delete a key
+ * - incr(key): Increment value by 1
+ * - decr(key): Decrement value by 1
+ * - exists(key): Check if key exists
+ * - expire(key, seconds): Set key expiration in seconds
+ * - ttl(key): Get time-to-live for a key
+ * - hmset(key, fields): Set multiple hash fields
+ * - hmget(key, fields): Get multiple hash field values
+ * - sismember(key, member): Check if member is in set
+ * - sadd(key, member): Add member to set
+ * - srem(key, member): Remove member from set
+ * - smembers(key): Get all members in a set
+ * - srandmember(key): Get random member from set
+ * - spop(key): Remove and return random member from set
+ * - hincrby(key, field, value): Increment hash field by integer
+ * - hincrbyfloat(key, field, value): Increment hash field by float
+ */
+
+// Redis connection information
+let REDIS_TEMP_DIR = tempDirWithFiles("redis-tmp", {
+ "a.txt": "a",
+});
+let REDIS_PORT = randomPort();
+let REDIS_TLS_PORT = randomPort();
+let REDIS_HOST = "0.0.0.0";
+let REDIS_UNIX_SOCKET = REDIS_TEMP_DIR + "/redis.sock";
+
+// Connection types
+export enum ConnectionType {
+ TCP = "tcp",
+ TLS = "tls",
+ UNIX = "unix",
+ AUTH = "auth",
+ READONLY = "readonly",
+ WRITEONLY = "writeonly",
+}
+
+// Default test options
+export const DEFAULT_REDIS_OPTIONS = {
+ username: "default",
+ password: "",
+ db: 0,
+ tls: false,
+};
+
+export const TLS_REDIS_OPTIONS = {
+ ...DEFAULT_REDIS_OPTIONS,
+ db: 1,
+ tls: true,
+ tls_cert_file: path.join(import.meta.dir, "docker-unified", "server.crt"),
+ tls_key_file: path.join(import.meta.dir, "docker-unified", "server.key"),
+ tls_ca_file: path.join(import.meta.dir, "docker-unified", "server.crt"),
+};
+
+export const UNIX_REDIS_OPTIONS = {
+ ...DEFAULT_REDIS_OPTIONS,
+ db: 2,
+};
+
+export const AUTH_REDIS_OPTIONS = {
+ ...DEFAULT_REDIS_OPTIONS,
+ db: 3,
+ username: "testuser",
+ password: "test123",
+};
+
+export const READONLY_REDIS_OPTIONS = {
+ ...DEFAULT_REDIS_OPTIONS,
+ db: 4,
+ username: "readonly",
+ password: "readonly",
+};
+
+export const WRITEONLY_REDIS_OPTIONS = {
+ ...DEFAULT_REDIS_OPTIONS,
+ db: 5,
+ username: "writeonly",
+ password: "writeonly",
+};
+
+// Default test URLs - will be updated if Docker containers are started
+export let DEFAULT_REDIS_URL = `redis://${REDIS_HOST}:${REDIS_PORT}`;
+export let TLS_REDIS_URL = `rediss://${REDIS_HOST}:${REDIS_TLS_PORT}`;
+export let UNIX_REDIS_URL = `redis+unix://${REDIS_UNIX_SOCKET}`;
+export let AUTH_REDIS_URL = `redis://testuser:test123@${REDIS_HOST}:${REDIS_PORT}`;
+export let READONLY_REDIS_URL = `redis://readonly:readonly@${REDIS_HOST}:${REDIS_PORT}`;
+export let WRITEONLY_REDIS_URL = `redis://writeonly:writeonly@${REDIS_HOST}:${REDIS_PORT}`;
+
+// Random key prefix to avoid collisions during testing
+export const TEST_KEY_PREFIX = `bun-test-${Date.now()}-`;
+
+/**
+ * Container configuration interface
+ */
+interface ContainerConfiguration {
+ port?: number;
+ tlsPort?: number;
+ containerName: string;
+ useUnixSocket: boolean;
+}
+
+// Shared container configuration
+let containerConfig: ContainerConfiguration | null = null;
+let dockerStarted = false;
+
+/**
+ * Start the Redis Docker container with TCP, TLS, and Unix socket support
+ */
+async function startContainer(): Promise {
+ if (dockerStarted) {
+ return containerConfig as ContainerConfiguration;
+ }
+
+ try {
+ // Check for any existing running valkey-unified-test containers
+ const checkRunning = Bun.spawn({
+ cmd: [
+ dockerCLI,
+ "ps",
+ "--filter",
+ "name=valkey-unified-test",
+ "--filter",
+ "status=running",
+ "--format",
+ "{{json .}}",
+ ],
+ stdout: "pipe",
+ });
+
+ let runningContainers = await new Response(checkRunning.stdout).text();
+ runningContainers = runningContainers.trim();
+
+ console.log(`Running containers: ${runningContainers}`);
+
+ if (runningContainers.trim()) {
+ // Parse the JSON container information
+ const containerInfo = JSON.parse(runningContainers);
+ const containerName = containerInfo.Names;
+
+ // Parse port mappings from the Ports field
+ const portsString = containerInfo.Ports;
+ const portMappings = portsString.split(", ");
+
+ let port = 0;
+ let tlsPort = 0;
+
+ console.log(portMappings);
+
+ // Extract port mappings for Redis ports 6379 and 6380
+ for (const mapping of portMappings) {
+ if (mapping.includes("->6379/tcp")) {
+ const match = mapping.split("->")[0].split(":")[1];
+ if (match) {
+ port = parseInt(match);
+ }
+ } else if (mapping.includes("->6380/tcp")) {
+ const match = mapping.split("->")[0].split(":")[1];
+ if (match) {
+ tlsPort = parseInt(match);
+ }
+ }
+ }
+
+ if (port && tlsPort) {
+ console.log(`Reusing existing container ${containerName} on ports ${port}:6379 and ${tlsPort}:6380`);
+
+ // Update Redis connection info
+ REDIS_PORT = port;
+ REDIS_TLS_PORT = tlsPort;
+ DEFAULT_REDIS_URL = `redis://${REDIS_HOST}:${REDIS_PORT}`;
+ TLS_REDIS_URL = `rediss://${REDIS_HOST}:${REDIS_TLS_PORT}`;
+ UNIX_REDIS_URL = `redis+unix:${REDIS_UNIX_SOCKET}`;
+ AUTH_REDIS_URL = `redis://testuser:test123@${REDIS_HOST}:${REDIS_PORT}`;
+ READONLY_REDIS_URL = `redis://readonly:readonly@${REDIS_HOST}:${REDIS_PORT}`;
+ WRITEONLY_REDIS_URL = `redis://writeonly:writeonly@${REDIS_HOST}:${REDIS_PORT}`;
+
+ containerConfig = {
+ port,
+ tlsPort,
+ containerName,
+ useUnixSocket: true,
+ };
+
+ dockerStarted = true;
+ return containerConfig;
+ }
+ }
+
+ // No suitable running container found, create a new one
+ console.log("Building unified Redis Docker image...");
+ const dockerfilePath = path.join(import.meta.dir, "docker-unified", "Dockerfile");
+ await Bun.spawn(
+ [dockerCLI, "build", "--pull", "--rm", "-f", dockerfilePath, "-t", "bun-valkey-unified-test", "."],
+ {
+ cwd: path.join(import.meta.dir, "docker-unified"),
+ stdio: ["inherit", "inherit", "inherit"],
+ },
+ ).exited;
+
+ const port = randomPort();
+ const tlsPort = randomPort();
+
+ // Create container name with fixed name
+ const containerName = `valkey-unified-test-bun`;
+
+ // Check if container exists and remove it
+ try {
+ const containerCheck = Bun.spawn({
+ cmd: [dockerCLI, "ps", "-a", "--filter", `name=${containerName}`, "--format", "{{.ID}}"],
+ stdout: "pipe",
+ });
+
+ const containerId = await new Response(containerCheck.stdout).text();
+ if (containerId.trim()) {
+ console.log(`Removing existing container ${containerName}`);
+ await Bun.spawn([dockerCLI, "rm", "-f", containerName]).exited;
+ }
+ } catch (error) {
+ // Container might not exist, ignore error
+ }
+
+ // Update Redis connection info
+ REDIS_PORT = port;
+ REDIS_TLS_PORT = tlsPort;
+ DEFAULT_REDIS_URL = `redis://${REDIS_HOST}:${REDIS_PORT}`;
+ TLS_REDIS_URL = `rediss://${REDIS_HOST}:${REDIS_TLS_PORT}`;
+ UNIX_REDIS_URL = `redis+unix://${REDIS_UNIX_SOCKET}`;
+ AUTH_REDIS_URL = `redis://testuser:test123@${REDIS_HOST}:${REDIS_PORT}`;
+ READONLY_REDIS_URL = `redis://readonly:readonly@${REDIS_HOST}:${REDIS_PORT}`;
+ WRITEONLY_REDIS_URL = `redis://writeonly:writeonly@${REDIS_HOST}:${REDIS_PORT}`;
+
+ containerConfig = {
+ port,
+ tlsPort,
+ containerName,
+ useUnixSocket: true,
+ };
+
+ // Start the unified container with TCP, TLS, and Unix socket
+ console.log(`Starting Redis container ${containerName} on ports ${port}:6379 and ${tlsPort}:6380...`);
+
+ const startProcess = Bun.spawn({
+ cmd: [
+ dockerCLI,
+ "run",
+ "-d",
+ "--name",
+ containerName,
+ "-p",
+ `${port}:6379`,
+ "-p",
+ `${tlsPort}:6380`,
+ // TODO: unix domain socket has permission errors in CI.
+ // "-v",
+ // `${REDIS_TEMP_DIR}:/tmp`,
+ "--health-cmd",
+ "redis-cli ping || exit 1",
+ "--health-interval",
+ "2s",
+ "--health-timeout",
+ "1s",
+ "--health-retries",
+ "5",
+ "bun-valkey-unified-test",
+ ],
+ stdout: "pipe",
+ stderr: "pipe",
+ });
+
+ const containerID = await new Response(startProcess.stdout).text();
+ const startError = await new Response(startProcess.stderr).text();
+ const startExitCode = await startProcess.exited;
+
+ if (startExitCode !== 0 || !containerID.trim()) {
+ console.error(`Failed to start container. Exit code: ${startExitCode}, Error: ${startError}`);
+ throw new Error(`Failed to start Redis container: ${startError || "unknown error"}`);
+ }
+
+ console.log(`Container started with ID: ${containerID.trim()}`);
+
+ // Wait a moment for container to initialize
+ console.log("Waiting for container to initialize...");
+ await new Promise(resolve => setTimeout(resolve, 3000));
+
+ // Check if Redis is responding inside the container
+ const redisPingProcess = Bun.spawn({
+ cmd: [dockerCLI, "exec", containerName, "redis-cli", "ping"],
+ stdout: "pipe",
+ stderr: "pipe",
+ });
+
+ const redisPingOutput = await new Response(redisPingProcess.stdout).text();
+ console.log(`Redis inside container responds: ${redisPingOutput.trim()}`);
+ redisPingProcess.kill?.();
+
+ // Also try to get Redis info to ensure it's configured properly
+ const redisInfoProcess = Bun.spawn({
+ cmd: [dockerCLI, "exec", containerName, "redis-cli", "info", "server"],
+ stdout: "pipe",
+ });
+
+ const redisInfo = await new Response(redisInfoProcess.stdout).text();
+ console.log(`Redis server info: Redis version ${redisInfo.match(/redis_version:(.*)/)?.[1]?.trim() || "unknown"}`);
+ redisInfoProcess.kill?.();
+
+ // Check if the container is actually running
+ const containerRunning = Bun.spawn({
+ cmd: [dockerCLI, "ps", "--filter", `name=${containerName}`, "--format", "{{.ID}}"],
+ stdout: "pipe",
+ stderr: "pipe",
+ });
+
+ const runningStatus = await new Response(containerRunning.stdout).text();
+ containerRunning.kill?.();
+
+ if (!runningStatus.trim()) {
+ console.error(`Container ${containerName} failed to start properly`);
+
+ // Get logs to see what happened
+ const logs = Bun.spawn({
+ cmd: [dockerCLI, "logs", containerName],
+ stdout: "pipe",
+ stderr: "pipe",
+ });
+
+ const logOutput = await new Response(logs.stdout).text();
+ const errOutput = await new Response(logs.stderr).text();
+
+ console.log(`Container logs:\n${logOutput}\n${errOutput}`);
+
+ // Check container status to get more details
+ const inspectProcess = Bun.spawn({
+ cmd: [dockerCLI, "inspect", containerName],
+ stdout: "pipe",
+ });
+
+ const inspectOutput = await new Response(inspectProcess.stdout).text();
+ console.log(`Container inspection:\n${inspectOutput}`);
+
+ inspectProcess.kill?.();
+ throw new Error(`Redis container failed to start - check logs for details`);
+ }
+
+ console.log(`Container ${containerName} is running, waiting for Redis services...`);
+
+ dockerStarted = true;
+ return containerConfig;
+ } catch (error) {
+ console.error("Error starting Redis container:", error);
+ throw error;
+ }
+}
+
+let dockerSetupPromise: Promise;
+/**
+ * Set up Docker container for all connection types
+ * This will be called once before any tests run
+ */
+export async function setupDockerContainer() {
+ if (!dockerStarted) {
+ try {
+ containerConfig = await (dockerSetupPromise ??= startContainer());
+
+ return true;
+ } catch (error) {
+ console.error("Failed to start Redis container:", error);
+ return false;
+ }
+ }
+ return dockerStarted;
+}
+
+/**
+ * Generate a unique test key to avoid collisions in Redis data
+ */
+export function testKey(name: string): string {
+ return `${context.id}:${TEST_KEY_PREFIX}${name}`;
+}
+
+// Import needed functions from Bun
+import { tmpdir } from "os";
+
+/**
+ * Create a new client with specific connection type
+ */
+export function createClient(connectionType: ConnectionType = ConnectionType.TCP, customOptions = {}) {
+ let url: string;
+ let options: any = {};
+ context.id++;
+
+ switch (connectionType) {
+ case ConnectionType.TCP:
+ url = DEFAULT_REDIS_URL;
+ options = {
+ ...DEFAULT_REDIS_OPTIONS,
+ ...customOptions,
+ };
+ break;
+ case ConnectionType.TLS:
+ url = TLS_REDIS_URL;
+ options = {
+ ...TLS_REDIS_OPTIONS,
+ ...customOptions,
+ };
+ break;
+ case ConnectionType.UNIX:
+ url = UNIX_REDIS_URL;
+ options = {
+ ...UNIX_REDIS_OPTIONS,
+ ...customOptions,
+ };
+ break;
+ case ConnectionType.AUTH:
+ url = AUTH_REDIS_URL;
+ options = {
+ ...AUTH_REDIS_OPTIONS,
+ ...customOptions,
+ };
+ break;
+ case ConnectionType.READONLY:
+ url = READONLY_REDIS_URL;
+ options = {
+ ...READONLY_REDIS_OPTIONS,
+ ...customOptions,
+ };
+ break;
+ case ConnectionType.WRITEONLY:
+ url = WRITEONLY_REDIS_URL;
+ options = {
+ ...WRITEONLY_REDIS_OPTIONS,
+ ...customOptions,
+ };
+ break;
+ default:
+ throw new Error(`Unknown connection type: ${connectionType}`);
+ }
+
+ // Using Function constructor to avoid static analysis issues
+ return new RedisClient(url, options);
+}
+
+/**
+ * Wait for the client to initialize by sending a dummy command
+ */
+export async function initializeClient(client: any): Promise {
+ try {
+ await client.set(testKey("__init__"), "initializing");
+
+ return true;
+ } catch (err) {
+ console.warn("Failed to initialize Redis client:", err);
+ return false;
+ }
+}
+
+/**
+ * Testing context with shared clients and utilities
+ */
+export interface TestContext {
+ redis: RedisClient;
+ initialized: boolean;
+ keyPrefix: string;
+ generateKey: (name: string) => string;
+ // Optional clients for various connection types
+ redisTLS?: RedisClient;
+ redisUnix?: RedisClient;
+ redisAuth?: RedisClient;
+ redisReadOnly?: RedisClient;
+ redisWriteOnly?: RedisClient;
+ id: number;
+}
+
+// Create a singleton promise for Docker initialization
+let dockerInitPromise: Promise | null = null;
+
+/**
+ * Setup shared test context for test suites
+ */
+let id = Math.trunc(Math.random() * 1000000);
+// Initialize test context with TCP client by d efault
+export const context: TestContext = {
+ redis: undefined,
+ initialized: false,
+ keyPrefix: TEST_KEY_PREFIX,
+ generateKey: testKey,
+ redisTLS: undefined,
+ redisUnix: undefined,
+ redisAuth: undefined,
+ redisReadOnly: undefined,
+ redisWriteOnly: undefined,
+ id,
+};
+export { context as ctx };
+
+if (isEnabled)
+ beforeAll(async () => {
+ // Initialize Docker container once for all tests
+ if (!dockerInitPromise) {
+ dockerInitPromise = setupDockerContainer();
+ }
+
+ // Wait for Docker to initialize
+ await dockerInitPromise;
+ context.redis = createClient(ConnectionType.TCP);
+ context.redisTLS = createClient(ConnectionType.TLS);
+ context.redisUnix = createClient(ConnectionType.UNIX);
+ context.redisAuth = createClient(ConnectionType.AUTH);
+ context.redisReadOnly = createClient(ConnectionType.READONLY);
+ context.redisWriteOnly = createClient(ConnectionType.WRITEONLY);
+
+ // Initialize the standard TCP client
+ context.initialized = await initializeClient(context.redis);
+
+ // // Initialize all other clients that were requested
+ // if (context.redisTLS) {
+ // try {
+ // await initializeClient(context.redisTLS);
+ // } catch (err) {
+ // console.warn("TLS client initialization failed - TLS tests may be skipped");
+ // }
+ // }
+
+ // if (context.redisUnix) {
+ // try {
+ // await initializeClient(context.redisUnix);
+ // } catch (err) {
+ // console.warn("Unix socket client initialization failed - Unix socket tests may be skipped");
+ // }
+ // }
+
+ // if (context.redisAuth) {
+ // try {
+ // await initializeClient(context.redisAuth);
+ // } catch (err) {
+ // console.warn("Auth client initialization failed - Auth tests may be skipped");
+ // }
+ // }
+
+ // if (context.redisReadOnly) {
+ // try {
+ // // For read-only we just check connection, not write
+ // await context.redisReadOnly.send("PING", []);
+ // console.log("Read-only client initialized");
+ // } catch (err) {
+ // console.warn("Read-only client initialization failed - Read-only tests may be skipped");
+ // }
+ // }
+
+ // if (context.redisWriteOnly) {
+ // try {
+ // await initializeClient(context.redisWriteOnly);
+ // } catch (err) {
+ // console.warn("Write-only client initialization failed - Write-only tests may be skipped");
+ // }
+ // }
+
+ // if (!context.initialized) {
+ // console.warn("Test initialization failed - tests may be skipped");
+ // }
+ });
+
+if (isEnabled)
+ afterAll(async () => {
+ console.log("Cleaning up Redis container");
+ if (!context.redis?.connected) {
+ return;
+ }
+
+ try {
+ // Clean up Redis keys created during tests
+ const keys = await context.redis.send("KEYS", [`${TEST_KEY_PREFIX}*`]);
+ if (Array.isArray(keys) && keys.length > 0) {
+ // Using del command directly when available
+ if (keys.length === 1) {
+ await context.redis.del(keys[0]);
+ } else {
+ await context.redis.send("DEL", keys);
+ }
+ }
+
+ // Disconnect all clients
+ await context.redis.disconnect();
+
+ if (context.redisTLS) {
+ await context.redisTLS.disconnect();
+ }
+
+ if (context.redisUnix) {
+ await context.redisUnix.disconnect();
+ }
+
+ if (context.redisAuth) {
+ await context.redisAuth.disconnect();
+ }
+
+ if (context.redisReadOnly) {
+ await context.redisReadOnly.disconnect();
+ }
+
+ if (context.redisWriteOnly) {
+ await context.redisWriteOnly.disconnect();
+ }
+ } catch (err) {
+ console.error("Error during test cleanup:", err);
+ }
+ });
+
+if (!isEnabled) {
+ console.warn("Redis is not enabled, skipping tests");
+}
+
+/**
+ * Verify that a value is of a specific type
+ */
+export function expectType(
+ value: any,
+ expectedType: "string" | "number" | "bigint" | "boolean" | "symbol" | "undefined" | "object" | "function",
+): asserts value is T {
+ expect(value).toBeTypeOf(expectedType);
+}
+
+/**
+ * Wait for a specified amount of time
+ */
+export function delay(ms: number): Promise {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+/**
+ * Retry a function until it succeeds or times out
+ */
+export async function retry(
+ fn: () => Promise,
+ options: {
+ maxAttempts?: number;
+ delay?: number;
+ timeout?: number;
+ predicate?: (result: T) => boolean;
+ } = {},
+): Promise {
+ const { maxAttempts = 5, delay: delayMs = 100, timeout = 5000, predicate = r => !!r } = options;
+
+ const startTime = Date.now();
+ let attempts = 0;
+
+ while (attempts < maxAttempts && Date.now() - startTime < timeout) {
+ attempts++;
+ try {
+ const result = await fn();
+ if (predicate(result)) {
+ return result;
+ }
+ } catch (e) {
+ if (attempts >= maxAttempts) throw e;
+ }
+
+ if (attempts < maxAttempts) {
+ await delay(delayMs);
+ }
+ }
+
+ throw new Error(`Retry failed after ${attempts} attempts (${Date.now() - startTime}ms)`);
+}
diff --git a/test/js/valkey/unit/basic-operations.test.ts b/test/js/valkey/unit/basic-operations.test.ts
new file mode 100644
index 0000000000..16ef021097
--- /dev/null
+++ b/test/js/valkey/unit/basic-operations.test.ts
@@ -0,0 +1,316 @@
+import { describe, test, expect, beforeEach } from "bun:test";
+import { ctx, expectType, createClient, ConnectionType, isEnabled } from "../test-utils";
+
+/**
+ * Test suite covering basic Redis operations
+ * - String operations (SET, GET, APPEND, GETDEL, etc)
+ * - Key expiration (EXPIRE, TTL)
+ * - Counter operations (INCR, DECR, INCRBY, DECRBY)
+ * - Existence checks (EXISTS)
+ * - Deletion operations (DEL)
+ */
+describe.skipIf(!isEnabled)("Valkey: Basic String Operations", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+ describe("String Commands", () => {
+ test("SET and GET commands", async () => {
+ const key = ctx.generateKey("string-test");
+ const value = "Hello Valkey!";
+
+ // SET should return OK
+ const setResult = await ctx.redis.set(key, value);
+ expect(setResult).toBe("OK");
+
+ // GET should return the value
+ const getResult = await ctx.redis.get(key);
+ expect(getResult).toBe(value);
+
+ // GET non-existent key should return null
+ const nonExistentKey = ctx.generateKey("non-existent");
+ const nullResult = await ctx.redis.get(nonExistentKey);
+ expect(nullResult).toBeNull();
+ });
+
+ test("MGET command", async () => {
+ const key1 = ctx.generateKey("mget-test-1");
+ const key2 = ctx.generateKey("mget-test-2");
+ const value1 = "Hello";
+ const value2 = "World";
+
+ await ctx.redis.set(key1, value1);
+ await ctx.redis.set(key2, value2);
+
+ const result = await ctx.redis.mget(key1, key2, ctx.generateKey("non-existent"));
+ expect(result).toEqual([value1, value2, null]);
+ });
+
+ test("SET with expiry option", async () => {
+ const key = ctx.generateKey("expiry-set-test");
+
+ // Set with expiry (EX option)
+ await ctx.redis.send("SET", [key, "expires-soon", "EX", "1"]);
+
+ // Key should exist immediately
+ const existsNow = await ctx.redis.exists(key);
+ expect(existsNow).toBe(true);
+
+ // Wait for expiration
+ await new Promise(resolve => setTimeout(resolve, 1500));
+
+ // Key should be gone after expiry
+ const existsLater = await ctx.redis.exists(key);
+ expect(existsLater).toBe(false);
+ });
+
+ test("APPEND command", async () => {
+ const key = ctx.generateKey("append-test");
+ const initialValue = "Hello";
+ const appendValue = " World";
+
+ // Set initial value
+ await ctx.redis.set(key, initialValue);
+
+ // Append additional content
+ const newLength = await ctx.redis.send("APPEND", [key, appendValue]);
+ expectType(newLength, "number");
+ expect(newLength).toBe(initialValue.length + appendValue.length);
+
+ // Verify appended content
+ const finalValue = await ctx.redis.get(key);
+ expect(finalValue).toBe(initialValue + appendValue);
+ });
+
+ test("GETDEL command", async () => {
+ const key = ctx.generateKey("getdel-test");
+ const value = "value-to-get-and-delete";
+
+ // Set the value
+ await ctx.redis.set(key, value);
+
+ // Get and delete in one operation
+ const result = await ctx.redis.send("GETDEL", [key]);
+ expect(result).toBe(value);
+
+ // Verify key is gone
+ const exists = await ctx.redis.exists(key);
+ expect(exists).toBe(false);
+ });
+
+ test("GETRANGE command", async () => {
+ const key = ctx.generateKey("getrange-test");
+ const value = "Hello Valkey World";
+
+ // Set the value
+ await ctx.redis.set(key, value);
+
+ // Get substring using GETRANGE
+ const result = await ctx.redis.send("GETRANGE", [key, "6", "12"]);
+ expect(result).toBe("Valkey ");
+ });
+
+ test("SETRANGE command", async () => {
+ const key = ctx.generateKey("setrange-test");
+ const value = "Hello World";
+
+ // Set the initial value
+ await ctx.redis.set(key, value);
+
+ // Replace "World" with "Valkey" starting at position 6
+ const newLength = await ctx.redis.send("SETRANGE", [key, "6", "Valkey"]);
+ expectType(newLength, "number");
+
+ // Expected length is the maximum of original length and (offset + replacement length)
+ const expectedLength = Math.max(value.length, 6 + "Valkey".length);
+ expect(newLength).toBe(expectedLength);
+
+ // Verify the updated string
+ const updatedValue = await ctx.redis.get(key);
+ expect(updatedValue).toBe("Hello Valkey");
+ });
+
+ test("STRLEN command", async () => {
+ const key = ctx.generateKey("strlen-test");
+ const value = "Hello Valkey";
+
+ // Set the value
+ await ctx.redis.set(key, value);
+
+ // Get string length
+ const length = await ctx.redis.send("STRLEN", [key]);
+ expectType(length, "number");
+ expect(length).toBe(value.length);
+ });
+ });
+
+ describe("Counter Operations", () => {
+ test("INCR and DECR commands", async () => {
+ const key = ctx.generateKey("counter-test");
+
+ // Set initial counter value
+ await ctx.redis.set(key, "10");
+
+ // INCR should increment and return new value
+ const incremented = await ctx.redis.incr(key);
+ expectType(incremented, "number");
+ expect(incremented).toBe(11);
+
+ // DECR should decrement and return new value
+ const decremented = await ctx.redis.decr(key);
+ expectType(decremented, "number");
+ expect(decremented).toBe(10);
+ });
+
+ test("INCRBY and DECRBY commands", async () => {
+ const key = ctx.generateKey("incrby-test");
+
+ // Set initial counter value
+ await ctx.redis.set(key, "10");
+
+ // INCRBY should add specified value and return result
+ const incremented = await ctx.redis.send("INCRBY", [key, "5"]);
+ expectType(incremented, "number");
+ expect(incremented).toBe(15);
+
+ // DECRBY should subtract specified value and return result
+ const decremented = await ctx.redis.send("DECRBY", [key, "7"]);
+ expectType(decremented, "number");
+ expect(decremented).toBe(8);
+ });
+
+ test("INCRBYFLOAT command", async () => {
+ const key = ctx.generateKey("incrbyfloat-test");
+
+ // Set initial counter value
+ await ctx.redis.set(key, "10.5");
+
+ // INCRBYFLOAT should add specified float value and return result
+ const result = await ctx.redis.send("INCRBYFLOAT", [key, "0.7"]);
+ expectType(result, "string");
+ expect(result).toBe("11.2");
+
+ // INCRBYFLOAT also works with negative values for subtraction
+ const subtracted = await ctx.redis.send("INCRBYFLOAT", [key, "-1.2"]);
+ expectType(subtracted, "string");
+ expect(subtracted).toBe("10");
+ });
+ });
+
+ describe("Key Expiration", () => {
+ test("EXPIRE and TTL commands", async () => {
+ const key = ctx.generateKey("expire-test");
+
+ // Set a key
+ await ctx.redis.set(key, "expiring-value");
+
+ // Set expiration (60 seconds)
+ const expireResult = await ctx.redis.expire(key, 60);
+ expectType(expireResult, "number");
+ expect(expireResult).toBe(1); // 1 indicates success
+
+ // Get TTL
+ const ttl = await ctx.redis.ttl(key);
+ expectType(ttl, "number");
+ expect(ttl).toBeGreaterThan(0); // Should be positive number of seconds
+ expect(ttl).toBeLessThanOrEqual(60);
+ });
+
+ test("TTL for non-existent and non-expiring keys", async () => {
+ // Test non-existent key
+ const nonExistentKey = ctx.generateKey("non-existent");
+ const nonExistentTTL = await ctx.redis.ttl(nonExistentKey);
+ expect(nonExistentTTL).toBe(-2); // -2 indicates key doesn't exist
+
+ // Test key with no expiration
+ const permanentKey = ctx.generateKey("permanent");
+ await ctx.redis.set(permanentKey, "no-expiry");
+ const permanentTTL = await ctx.redis.ttl(permanentKey);
+ expect(permanentTTL).toBe(-1); // -1 indicates no expiration
+ });
+
+ test("PEXPIRE and PTTL commands (millisecond precision)", async () => {
+ const key = ctx.generateKey("pexpire-test");
+
+ // Set a key
+ await ctx.redis.set(key, "expiring-value-ms");
+
+ // Set expiration with millisecond precision (5000 ms = 5 seconds)
+ const expireResult = await ctx.redis.send("PEXPIRE", [key, "5000"]);
+ expectType(expireResult, "number");
+ expect(expireResult).toBe(1); // 1 indicates success
+
+ // Get TTL with millisecond precision
+ const pttl = await ctx.redis.send("PTTL", [key]);
+ expectType(pttl, "number");
+ expect(pttl).toBeGreaterThan(0); // Should be positive number of milliseconds
+ expect(pttl).toBeLessThanOrEqual(5000);
+ });
+ });
+
+ describe("Existence and Deletion", () => {
+ test("EXISTS command", async () => {
+ const key = ctx.generateKey("exists-test");
+
+ // Initially key should not exist
+ const initialExists = await ctx.redis.exists(key);
+ expect(initialExists).toBe(false);
+
+ // Set the key
+ await ctx.redis.set(key, "exists-now");
+
+ // Now key should exist
+ const nowExists = await ctx.redis.exists(key);
+ expect(nowExists).toBe(true);
+ });
+
+ test("DEL command", async () => {
+ const key1 = ctx.generateKey("del-test-1");
+ const key2 = ctx.generateKey("del-test-2");
+
+ // Set two keys
+ await ctx.redis.set(key1, "value1");
+ await ctx.redis.set(key2, "value2");
+
+ // Delete a single key
+ const singleDelCount = await ctx.redis.del(key1);
+ expectType(singleDelCount, "number");
+ expect(singleDelCount).toBe(1); // 1 key deleted
+
+ // Key should not exist anymore
+ const exists1 = await ctx.redis.exists(key1);
+ expect(exists1).toBe(false);
+
+ // Second key should still exist
+ const exists2 = await ctx.redis.exists(key2);
+ expect(exists2).toBe(true);
+
+ // Delete multiple keys using sendCommand
+ const multipleDelCount = await ctx.redis.send("DEL", [key1, key2]);
+ expectType(multipleDelCount, "number");
+ expect(multipleDelCount).toBe(1); // Only 1 key existed and was deleted
+ });
+
+ test("UNLINK command (asynchronous delete)", async () => {
+ const key1 = ctx.generateKey("unlink-test-1");
+ const key2 = ctx.generateKey("unlink-test-2");
+
+ // Set two keys
+ await ctx.redis.set(key1, "value1");
+ await ctx.redis.set(key2, "value2");
+
+ // Unlink multiple keys
+ const unlinkCount = await ctx.redis.send("UNLINK", [key1, key2]);
+ expectType(unlinkCount, "number");
+ expect(unlinkCount).toBe(2); // 2 keys were unlinked
+
+ // Keys should not exist anymore
+ const exists1 = await ctx.redis.exists(key1);
+ const exists2 = await ctx.redis.exists(key2);
+ expect(exists1).toBe(false);
+ expect(exists2).toBe(false);
+ });
+ });
+});
diff --git a/test/js/valkey/unit/hash-operations.test.ts b/test/js/valkey/unit/hash-operations.test.ts
new file mode 100644
index 0000000000..b64c02db79
--- /dev/null
+++ b/test/js/valkey/unit/hash-operations.test.ts
@@ -0,0 +1,273 @@
+import { describe, expect, test, beforeEach } from "bun:test";
+import { ConnectionType, createClient, ctx, expectType, isEnabled } from "../test-utils";
+
+/**
+ * Test suite covering Redis hash operations
+ * - Single field operations (HSET, HGET, HDEL)
+ * - Multiple field operations (HMSET, HMGET)
+ * - Incremental operations (HINCRBY, HINCRBYFLOAT)
+ * - Hash scanning operations (HGETALL, HKEYS, HVALS)
+ */
+describe.skipIf(!isEnabled)("Valkey: Hash Data Type Operations", () => {
+ beforeEach(async () => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+
+ describe("Basic Hash Commands", () => {
+ test("HSET and HGET commands", async () => {
+ const key = ctx.generateKey("hash-test");
+
+ // HSET a single field
+ const setResult = await ctx.redis.send("HSET", [key, "name", "John"]);
+ expectType(setResult, "number");
+ expect(setResult).toBe(1); // 1 new field was set
+
+ // HGET the field
+ const getResult = await ctx.redis.send("HGET", [key, "name"]);
+ expect(getResult).toBe("John");
+
+ // HGET non-existent field should return null
+ const nonExistentField = await ctx.redis.send("HGET", [key, "nonexistent"]);
+ expect(nonExistentField).toBeNull();
+ });
+
+ test("HMSET and HMGET commands", async () => {
+ const key = ctx.generateKey("hmset-test");
+
+ // HMSET multiple fields
+ const hmsetResult = await ctx.redis.hmset(key, ["name", "Alice", "age", "30", "active", "true"]);
+ expect(hmsetResult).toBe("OK");
+
+ // HMGET specific fields
+ const hmgetResult = await ctx.redis.hmget(key, ["name", "age"]);
+ expect(Array.isArray(hmgetResult)).toBe(true);
+ expect(hmgetResult).toEqual(["Alice", "30"]);
+
+ // HMGET with non-existent fields
+ const mixedResult = await ctx.redis.hmget(key, ["name", "nonexistent"]);
+ expect(Array.isArray(mixedResult)).toBe(true);
+ expect(mixedResult).toEqual(["Alice", null]);
+ });
+
+ test("HMSET with object-style syntax", async () => {
+ const key = ctx.generateKey("hmset-object-test");
+
+ // We'll use sendCommand for this test since the native hmset doesn't support this syntax yet
+ await ctx.redis.send("HMSET", [key, "name", "Bob", "age", "25", "email", "bob@example.com"]);
+
+ // Verify all fields were set
+ const allFields = await ctx.redis.send("HGETALL", [key]);
+ expect(allFields).toBeDefined();
+
+ if (typeof allFields === "object" && allFields !== null) {
+ expect(allFields).toEqual({
+ name: "Bob",
+ age: "25",
+ email: "bob@example.com",
+ });
+ }
+ });
+
+ test("HDEL command", async () => {
+ const key = ctx.generateKey("hdel-test");
+
+ // Set multiple fields
+ await ctx.redis.send("HSET", [key, "field1", "value1", "field2", "value2", "field3", "value3"]);
+
+ // Delete a single field
+ const singleDelResult = await ctx.redis.send("HDEL", [key, "field1"]);
+ expectType(singleDelResult, "number");
+ expect(singleDelResult).toBe(1); // 1 field deleted
+
+ // Delete multiple fields
+ const multiDelResult = await ctx.redis.send("HDEL", [key, "field2", "field3", "nonexistent"]);
+ expectType(multiDelResult, "number");
+ expect(multiDelResult).toBe(2); // 2 fields deleted, non-existent field ignored
+
+ // Verify all fields are gone
+ const allFields = await ctx.redis.send("HKEYS", [key]);
+ expect(Array.isArray(allFields)).toBe(true);
+ expect(allFields.length).toBe(0);
+ });
+
+ test("HEXISTS command", async () => {
+ const key = ctx.generateKey("hexists-test");
+
+ // Set a field
+ await ctx.redis.send("HSET", [key, "field1", "value1"]);
+
+ // Check if field exists
+ const existsResult = await ctx.redis.send("HEXISTS", [key, "field1"]);
+ expectType(existsResult, "number");
+ expect(existsResult).toBe(1); // 1 indicates field exists
+
+ // Check non-existent field
+ const nonExistsResult = await ctx.redis.send("HEXISTS", [key, "nonexistent"]);
+ expectType(nonExistsResult, "number");
+ expect(nonExistsResult).toBe(0); // 0 indicates field does not exist
+ });
+ });
+
+ describe("Hash Incremental Operations", () => {
+ test("HINCRBY command", async () => {
+ const key = ctx.generateKey("hincrby-test");
+
+ // Set initial value
+ await ctx.redis.send("HSET", [key, "counter", "10"]);
+
+ // Increment by a value
+ const incrResult = await ctx.redis.hincrby(key, "counter", 5);
+ expectType(incrResult, "number");
+ expect(incrResult).toBe(15);
+
+ // Decrement by using negative increment
+ const decrResult = await ctx.redis.hincrby(key, "counter", -7);
+ expectType(decrResult, "number");
+ expect(decrResult).toBe(8);
+
+ // Increment non-existent field (creates it with value 0 first)
+ const newFieldResult = await ctx.redis.hincrby(key, "new-counter", 3);
+ expectType(newFieldResult, "number");
+ expect(newFieldResult).toBe(3);
+ });
+
+ test("HINCRBYFLOAT command", async () => {
+ const key = ctx.generateKey("hincrbyfloat-test");
+
+ // Set initial value
+ await ctx.redis.send("HSET", [key, "counter", "10.5"]);
+
+ // Increment by float value
+ const incrResult = await ctx.redis.hincrbyfloat(key, "counter", 1.5);
+ expect(incrResult).toBe("12");
+
+ // Decrement by using negative increment
+ const decrResult = await ctx.redis.hincrbyfloat(key, "counter", -2.5);
+ expect(decrResult).toBe("9.5");
+
+ // Increment non-existent field (creates it with value 0 first)
+ const newFieldResult = await ctx.redis.hincrbyfloat(key, "new-counter", 3.75);
+ expect(newFieldResult).toBe("3.75");
+ });
+ });
+
+ describe("Hash Scanning and Retrieval", () => {
+ test("HGETALL command", async () => {
+ const key = ctx.generateKey("hgetall-test");
+
+ // Set multiple fields
+ await ctx.redis.send("HSET", [
+ key,
+ "name",
+ "Charlie",
+ "age",
+ "40",
+ "email",
+ "charlie@example.com",
+ "active",
+ "true",
+ ]);
+
+ // Get all fields and values
+ const result = await ctx.redis.send("HGETALL", [key]);
+ expect(result).toBeDefined();
+
+ // When using RESP3, HGETALL returns a map/object
+ if (typeof result === "object" && result !== null) {
+ expect(result.name).toBe("Charlie");
+ expect(result.age).toBe("40");
+ expect(result.email).toBe("charlie@example.com");
+ expect(result.active).toBe("true");
+ }
+ });
+
+ test("HKEYS command", async () => {
+ const key = ctx.generateKey("hkeys-test");
+
+ // Set multiple fields
+ await ctx.redis.send("HSET", [key, "name", "Dave", "age", "35", "email", "dave@example.com"]);
+
+ // Get all field names
+ const result = await ctx.redis.send("HKEYS", [key]);
+ expect(Array.isArray(result)).toBe(true);
+ expect(result.length).toBe(3);
+ expect(result).toContain("name");
+ expect(result).toContain("age");
+ expect(result).toContain("email");
+ });
+
+ test("HVALS command", async () => {
+ const key = ctx.generateKey("hvals-test");
+
+ // Set multiple fields
+ await ctx.redis.send("HSET", [key, "name", "Eve", "age", "28", "email", "eve@example.com"]);
+
+ // Get all field values
+ const result = await ctx.redis.send("HVALS", [key]);
+ expect(Array.isArray(result)).toBe(true);
+ expect(result.length).toBe(3);
+ expect(result).toContain("Eve");
+ expect(result).toContain("28");
+ expect(result).toContain("eve@example.com");
+ });
+
+ test("HLEN command", async () => {
+ const key = ctx.generateKey("hlen-test");
+
+ // Set multiple fields
+ await ctx.redis.send("HSET", [key, "field1", "value1", "field2", "value2", "field3", "value3"]);
+
+ // Get number of fields
+ const result = await ctx.redis.send("HLEN", [key]);
+ expectType(result, "number");
+ expect(result).toBe(3);
+
+ // Delete a field and check again
+ await ctx.redis.send("HDEL", [key, "field1"]);
+ const updatedResult = await ctx.redis.send("HLEN", [key]);
+ expectType(updatedResult, "number");
+ expect(updatedResult).toBe(2);
+ });
+
+ test("HSCAN command", async () => {
+ const key = ctx.generateKey("hscan-test");
+
+ // Create a hash with many fields
+ const fieldCount = 20; // Reduced count for faster tests
+ const fieldArgs = [];
+ for (let i = 0; i < fieldCount; i++) {
+ fieldArgs.push(`field:${i}`, `value:${i}`);
+ }
+
+ await ctx.redis.send("HSET", [key, ...fieldArgs]);
+
+ // Use HSCAN to iterate through keys
+ const scanResult = await ctx.redis.send("HSCAN", [key, "0", "COUNT", "10"]);
+
+ // Validate scan result structure
+ expect(Array.isArray(scanResult)).toBe(true);
+ expect(scanResult.length).toBe(2);
+
+ // First element is cursor
+ expect(typeof scanResult[0]).toBe("string");
+
+ // Second element is the key-value pairs array
+ const pairs = scanResult[1];
+ expect(Array.isArray(pairs)).toBe(true);
+
+ // Should have key-value pairs (even number of elements)
+ expect(pairs.length % 2).toBe(0);
+
+ // Verify we have the expected pattern in our results
+ for (let i = 0; i < pairs.length; i += 2) {
+ const key = pairs[i];
+ const value = pairs[i + 1];
+ expect(key).toMatch(/^field:\d+$/);
+ expect(value).toMatch(/^value:\d+$/);
+ }
+ });
+ });
+});
diff --git a/test/js/valkey/unit/list-operations.test.ts b/test/js/valkey/unit/list-operations.test.ts
new file mode 100644
index 0000000000..6650c4c6ce
--- /dev/null
+++ b/test/js/valkey/unit/list-operations.test.ts
@@ -0,0 +1,448 @@
+import { describe, expect, test, beforeEach } from "bun:test";
+import { expectType, ctx, createClient, ConnectionType, isEnabled } from "../test-utils";
+import { isCI } from "harness";
+
+/**
+ * Test suite covering Redis list operations
+ * - Basic operations (LPUSH, RPUSH, LPOP, RPOP)
+ * - Range operations (LRANGE, LTRIM)
+ * - List information (LLEN, LINDEX)
+ * - Blocking operations (BLPOP, BRPOP)
+ */
+describe.skipIf(!isEnabled)("Valkey: List Data Type Operations", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+
+ describe("Basic List Operations", () => {
+ test("LPUSH and RPUSH commands", async () => {
+ const key = ctx.generateKey("list-push-test");
+
+ // Left push single value
+ const lpushResult = await ctx.redis.send("LPUSH", [key, "left-value"]);
+ expectType(lpushResult, "number");
+ expect(lpushResult).toBe(1); // List has 1 element
+
+ // Right push single value
+ const rpushResult = await ctx.redis.send("RPUSH", [key, "right-value"]);
+ expectType(rpushResult, "number");
+ expect(rpushResult).toBe(2); // List now has 2 elements
+
+ // Multiple values with LPUSH
+ const multiLpushResult = await ctx.redis.send("LPUSH", [key, "left1", "left2", "left3"]);
+ expectType(multiLpushResult, "number");
+ expect(multiLpushResult).toBe(5); // List now has 5 elements
+
+ // Multiple values with RPUSH
+ const multiRpushResult = await ctx.redis.send("RPUSH", [key, "right1", "right2"]);
+ expectType(multiRpushResult, "number");
+ expect(multiRpushResult).toBe(7); // List now has 7 elements
+
+ // Verify the list content (should be left3, left2, left1, left-value, right-value, right1, right2)
+ const range = await ctx.redis.send("LRANGE", [key, "0", "-1"]);
+ expect(Array.isArray(range)).toBe(true);
+ expect(range.length).toBe(7);
+ expect(range[0]).toBe("left3");
+ expect(range[3]).toBe("left-value");
+ expect(range[4]).toBe("right-value");
+ expect(range[6]).toBe("right2");
+ });
+
+ test("LPOP and RPOP commands", async () => {
+ const key = ctx.generateKey("list-pop-test");
+
+ // Set up test list
+ await ctx.redis.send("RPUSH", [key, "one", "two", "three", "four", "five"]);
+
+ // Pop from left side
+ const lpopResult = await ctx.redis.send("LPOP", [key]);
+ expect(lpopResult).toBe("one");
+
+ // Pop from right side
+ const rpopResult = await ctx.redis.send("RPOP", [key]);
+ expect(rpopResult).toBe("five");
+
+ // Pop multiple elements from left
+ const multiLpopResult = await ctx.redis.send("LPOP", [key, "2"]);
+ expect(Array.isArray(multiLpopResult)).toBe(true);
+ expect(multiLpopResult.length).toBe(2);
+ expect(multiLpopResult[0]).toBe("two");
+ expect(multiLpopResult[1]).toBe("three");
+
+ // Verify only "four" is left
+ const remaining = await ctx.redis.send("LRANGE", [key, "0", "-1"]);
+ expect(Array.isArray(remaining)).toBe(true);
+ expect(remaining.length).toBe(1);
+ expect(remaining[0]).toBe("four");
+ });
+
+ test("LRANGE command", async () => {
+ const key = ctx.generateKey("lrange-test");
+
+ // Set up test list with 10 elements
+ await ctx.redis.send("RPUSH", [key, "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]);
+
+ // Get full range - using LRANGE command
+ // TODO: When a direct lrange method is implemented, use that instead
+ const fullRange = await ctx.redis.send("LRANGE", [key, "0", "-1"]);
+ expect(Array.isArray(fullRange)).toBe(true);
+ expect(fullRange).toMatchInlineSnapshot(`
+ [
+ "0",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ "8",
+ "9",
+ ]
+ `);
+
+ // Get partial range from start
+ const startRange = await ctx.redis.send("LRANGE", [key, "0", "2"]);
+ expect(Array.isArray(startRange)).toBe(true);
+ expect(startRange).toMatchInlineSnapshot(`
+ [
+ "0",
+ "1",
+ "2",
+ ]
+ `);
+
+ // Get partial range from middle
+ const midRange = await ctx.redis.send("LRANGE", [key, "3", "6"]);
+ expect(Array.isArray(midRange)).toBe(true);
+ expect(midRange).toMatchInlineSnapshot(`
+ [
+ "3",
+ "4",
+ "5",
+ "6",
+ ]
+ `);
+
+ // Get partial range from end using negative indices
+ const endRange = await ctx.redis.send("LRANGE", [key, "-3", "-1"]);
+ expect(Array.isArray(endRange)).toBe(true);
+ expect(endRange).toMatchInlineSnapshot(`
+ [
+ "7",
+ "8",
+ "9",
+ ]
+ `);
+
+ // Out of range indexes should be limited
+ const outOfRange = await ctx.redis.send("LRANGE", [key, "5", "100"]);
+ expect(Array.isArray(outOfRange)).toBe(true);
+ expect(outOfRange).toMatchInlineSnapshot(`
+ [
+ "5",
+ "6",
+ "7",
+ "8",
+ "9",
+ ]
+ `);
+ });
+
+ test("LTRIM command", async () => {
+ const key = ctx.generateKey("ltrim-test");
+
+ // Set up test list with 10 elements
+ await ctx.redis.send("RPUSH", [key, "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]);
+
+ // Trim the list to keep only elements from index 2 to 7
+ // TODO: When a direct ltrim method is implemented, use that instead
+ const trimResult = await ctx.redis.send("LTRIM", [key, "2", "7"]);
+ expect(trimResult).toMatchInlineSnapshot(`"OK"`);
+
+ // Verify the trimmed list
+ const result = await ctx.redis.send("LRANGE", [key, "0", "-1"]);
+ expect(Array.isArray(result)).toBe(true);
+ expect(result).toMatchInlineSnapshot(`
+ [
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ ]
+ `);
+ });
+ });
+
+ describe("List Information", () => {
+ test("LLEN command", async () => {
+ const key = ctx.generateKey("llen-test");
+
+ // Empty list should have length 0
+ const emptyLen = await ctx.redis.send("LLEN", [key]);
+ expectType(emptyLen, "number");
+ expect(emptyLen).toBe(0);
+
+ // Add elements and check length
+ await ctx.redis.send("RPUSH", [key, "a", "b", "c", "d"]);
+ const len = await ctx.redis.send("LLEN", [key]);
+ expectType(len, "number");
+ expect(len).toBe(4);
+
+ // Remove elements and check length
+ await ctx.redis.send("LPOP", [key, "2"]);
+ const updatedLen = await ctx.redis.send("LLEN", [key]);
+ expectType(updatedLen, "number");
+ expect(updatedLen).toBe(2);
+ });
+
+ test("LINDEX command", async () => {
+ const key = ctx.generateKey("lindex-test");
+
+ // Set up test list
+ await ctx.redis.send("RPUSH", [key, "val0", "val1", "val2", "val3", "val4"]);
+
+ // Get element at index 0 (first element)
+ const firstElement = await ctx.redis.send("LINDEX", [key, "0"]);
+ expect(firstElement).toBe("val0");
+
+ // Get element at index 2 (middle element)
+ const middleElement = await ctx.redis.send("LINDEX", [key, "2"]);
+ expect(middleElement).toBe("val2");
+
+ // Get element at index -1 (last element)
+ const lastElement = await ctx.redis.send("LINDEX", [key, "-1"]);
+ expect(lastElement).toBe("val4");
+
+ // Get element at index -2 (second to last element)
+ const secondToLastElement = await ctx.redis.send("LINDEX", [key, "-2"]);
+ expect(secondToLastElement).toBe("val3");
+
+ // Get element at out of range index
+ const nonExistent = await ctx.redis.send("LINDEX", [key, "100"]);
+ expect(nonExistent).toBeNull();
+ });
+
+ test("LINSERT command", async () => {
+ const key = ctx.generateKey("linsert-test");
+
+ // Set up test list
+ await ctx.redis.send("RPUSH", [key, "one", "three", "four"]);
+
+ // Insert before a value
+ const beforeResult = await ctx.redis.send("LINSERT", [key, "BEFORE", "three", "two"]);
+ expectType(beforeResult, "number");
+ expect(beforeResult).toBe(4); // New length is 4
+
+ // Insert after a value
+ const afterResult = await ctx.redis.send("LINSERT", [key, "AFTER", "four", "five"]);
+ expectType(afterResult, "number");
+ expect(afterResult).toBe(5); // New length is 5
+
+ // Verify the list content
+ const content = await ctx.redis.send("LRANGE", [key, "0", "-1"]);
+ expect(Array.isArray(content)).toBe(true);
+ expect(content).toEqual(["one", "two", "three", "four", "five"]);
+
+ // Insert for non-existent pivot
+ const nonExistentResult = await ctx.redis.send("LINSERT", [key, "BEFORE", "nonexistent", "value"]);
+ expectType(nonExistentResult, "number");
+ expect(nonExistentResult).toBe(-1); // -1 indicates pivot wasn't found
+ });
+
+ test("LSET command", async () => {
+ const key = ctx.generateKey("lset-test");
+
+ // Set up test list
+ await ctx.redis.send("RPUSH", [key, "a", "b", "c", "d"]);
+
+ // Set element at index 1
+ const setResult = await ctx.redis.send("LSET", [key, "1", "B"]);
+ expect(setResult).toBe("OK");
+
+ // Set element at last index
+ const lastSetResult = await ctx.redis.send("LSET", [key, "-1", "D"]);
+ expect(lastSetResult).toBe("OK");
+
+ // Verify the modified list
+ const content = await ctx.redis.send("LRANGE", [key, "0", "-1"]);
+ expect(Array.isArray(content)).toBe(true);
+ expect(content).toEqual(["a", "B", "c", "D"]);
+
+ // Setting out of range index should error
+ try {
+ await ctx.redis.send("LSET", [key, "100", "value"]);
+ // We should not reach here
+ expect(false).toBe(true);
+ } catch (error) {
+ // Expected error
+ }
+ });
+ });
+
+ describe("List Position Operations", () => {
+ test("LPOS command", async () => {
+ const key = ctx.generateKey("lpos-test");
+
+ // Set up test list with duplicate elements
+ await ctx.redis.send("RPUSH", [key, "a", "b", "c", "d", "e", "a", "c", "a"]);
+
+ // Get first occurrence of "a"
+ const firstPos = await ctx.redis.send("LPOS", [key, "a"]);
+ expectType(firstPos, "number");
+ expect(firstPos).toBe(0);
+
+ // Get first occurrence of "c"
+ const firstPosC = await ctx.redis.send("LPOS", [key, "c"]);
+ expectType(firstPosC, "number");
+ expect(firstPosC).toBe(2);
+
+ // Get position of non-existent element
+ const nonExistentPos = await ctx.redis.send("LPOS", [key, "z"]);
+ expect(nonExistentPos).toBeNull();
+
+ // Get all occurrences of "a"
+ const allPosA = await ctx.redis.send("LPOS", [key, "a", "COUNT", "0"]);
+ expect(Array.isArray(allPosA)).toBe(true);
+ expect(allPosA).toEqual([0, 5, 7]);
+
+ // Get first 2 occurrences of "a"
+ const twoPos = await ctx.redis.send("LPOS", [key, "a", "COUNT", "2"]);
+ expect(Array.isArray(twoPos)).toBe(true);
+ expect(twoPos).toEqual([0, 5]);
+
+ // Get position of "a" starting from index 1
+ const posFromIndex = await ctx.redis.send("LPOS", [key, "a", "RANK", "2"]);
+ expectType(posFromIndex, "number");
+ expect(posFromIndex).toBe(5);
+ });
+ });
+
+ describe("List Moving Operations", () => {
+ test("RPOPLPUSH command", async () => {
+ const source = ctx.generateKey("rpoplpush-source");
+ const destination = ctx.generateKey("rpoplpush-dest");
+
+ // Set up source list
+ await ctx.redis.send("RPUSH", [source, "one", "two", "three"]);
+
+ // Set up destination list
+ await ctx.redis.send("RPUSH", [destination, "a", "b"]);
+
+ // Move element from source to destination
+ const result = await ctx.redis.send("RPOPLPUSH", [source, destination]);
+ expect(result).toBe("three");
+
+ // Verify source list
+ const sourceContent = await ctx.redis.send("LRANGE", [source, "0", "-1"]);
+ expect(Array.isArray(sourceContent)).toBe(true);
+ expect(sourceContent).toEqual(["one", "two"]);
+
+ // Verify destination list
+ const destContent = await ctx.redis.send("LRANGE", [destination, "0", "-1"]);
+ expect(Array.isArray(destContent)).toBe(true);
+ expect(destContent).toEqual(["three", "a", "b"]);
+ });
+
+ test("LMOVE command", async () => {
+ const source = ctx.generateKey("lmove-source");
+ const destination = ctx.generateKey("lmove-dest");
+
+ // Set up source list
+ await ctx.redis.send("RPUSH", [source, "one", "two", "three"]);
+
+ // Set up destination list
+ await ctx.redis.send("RPUSH", [destination, "a", "b"]);
+
+ // Right to left move
+ try {
+ const rtlResult = await ctx.redis.send("LMOVE", [source, destination, "RIGHT", "LEFT"]);
+ expect(rtlResult).toBe("three");
+
+ // Left to right move
+ const ltrResult = await ctx.redis.send("LMOVE", [source, destination, "LEFT", "RIGHT"]);
+ expect(ltrResult).toBe("one");
+
+ // Verify source list
+ const sourceContent = await ctx.redis.send("LRANGE", [source, "0", "-1"]);
+ expect(Array.isArray(sourceContent)).toBe(true);
+ expect(sourceContent).toEqual(["two"]);
+
+ // Verify destination list
+ const destContent = await ctx.redis.send("LRANGE", [destination, "0", "-1"]);
+ expect(Array.isArray(destContent)).toBe(true);
+ expect(destContent).toEqual(["three", "a", "b", "one"]);
+ } catch (error) {
+ // Some Redis versions might not support LMOVE
+ console.warn("LMOVE command not supported, skipping test");
+ }
+ });
+ });
+
+ describe.skipIf(isCI)("Blocking Operations", () => {
+ // Note: These tests can be problematic in automated test suites
+ // due to the blocking nature. We'll implement with very short timeouts.
+ test("BLPOP with timeout", async () => {
+ const key = ctx.generateKey("blpop-test");
+
+ // Try to pop from an empty list with 1 second timeout
+ const timeoutResult = await ctx.redis.send("BLPOP", [key, "1"]);
+ expect(timeoutResult).toBeNull(); // Should timeout and return null
+
+ // Add elements and then try again
+ await ctx.redis.send("RPUSH", [key, "value1", "value2"]);
+
+ // Now the BLPOP should immediately return
+ const result = await ctx.redis.send("BLPOP", [key, "1"]);
+ expect(Array.isArray(result)).toBe(true);
+ expect(result.length).toBe(2);
+ expect(result[0]).toBe(key);
+ expect(result[1]).toBe("value1");
+ });
+
+ test("BRPOP with timeout", async () => {
+ const key = ctx.generateKey("brpop-test");
+
+ // Try to pop from an empty list with 1 second timeout
+ const timeoutResult = await ctx.redis.send("BRPOP", [key, "1"]);
+ expect(timeoutResult).toBeNull(); // Should timeout and return null
+
+ // Add elements and then try again
+ await ctx.redis.send("RPUSH", [key, "value1", "value2"]);
+
+ // Now the BRPOP should immediately return
+ const result = await ctx.redis.send("BRPOP", [key, "1"]);
+ expect(Array.isArray(result)).toBe(true);
+ expect(result.length).toBe(2);
+ expect(result[0]).toBe(key);
+ expect(result[1]).toBe("value2");
+ });
+
+ test("BRPOPLPUSH with timeout", async () => {
+ const source = ctx.generateKey("brpoplpush-source");
+ const destination = ctx.generateKey("brpoplpush-dest");
+
+ // Try with empty source and 1 second timeout
+ const timeoutResult = await ctx.redis.send("BRPOPLPUSH", [source, destination, "1"]);
+ expect(timeoutResult).toBeNull(); // Should timeout and return null
+
+ // Set up source and destination
+ await ctx.redis.send("RPUSH", [source, "value1", "value2"]);
+ await ctx.redis.send("RPUSH", [destination, "a", "b"]);
+
+ // Now should immediately return
+ const result = await ctx.redis.send("BRPOPLPUSH", [source, destination, "1"]);
+ expect(result).toBe("value2");
+
+ // Verify destination received the element
+ const destContent = await ctx.redis.send("LRANGE", [destination, "0", "-1"]);
+ expect(Array.isArray(destContent)).toBe(true);
+ expect(destContent).toEqual(["value2", "a", "b"]);
+ });
+ });
+});
diff --git a/test/js/valkey/unit/set-operations.test.ts b/test/js/valkey/unit/set-operations.test.ts
new file mode 100644
index 0000000000..979365dd68
--- /dev/null
+++ b/test/js/valkey/unit/set-operations.test.ts
@@ -0,0 +1,349 @@
+import { describe, test, expect, beforeEach } from "bun:test";
+import { ctx, createClient, ConnectionType, expectType, isEnabled } from "../test-utils";
+
+/**
+ * Test suite covering Redis set operations
+ * - Basic operations (SADD, SREM, SISMEMBER)
+ * - Set retrieval (SMEMBERS, SCARD)
+ * - Set manipulation (SPOP, SRANDMEMBER)
+ * - Set operations (SUNION, SINTER, SDIFF)
+ */
+describe.skipIf(!isEnabled)("Valkey: Set Data Type Operations", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+
+ describe("Basic Set Operations", () => {
+ test("SADD and SISMEMBER commands", async () => {
+ const key = ctx.generateKey("set-test");
+
+ // Add a single member
+ const singleAddResult = await ctx.redis.sadd(key, "member1");
+ console.log("singleAddResult", singleAddResult);
+ expectType(singleAddResult, "number");
+ expect(singleAddResult).toBe(1); // 1 new member added
+
+ // Add multiple members using sendCommand
+ const multiAddResult = await ctx.redis.send("SADD", [key, "member2", "member3", "member1"]);
+ expectType(multiAddResult, "number");
+ expect(multiAddResult).toBe(2); // 2 new members added, 1 duplicate ignored
+
+ // Check if member exists
+ const isFirstMember = await ctx.redis.sismember(key, "member1");
+ expect(isFirstMember).toBe(true);
+
+ // Check if non-existent member exists
+ const isNonMember = await ctx.redis.sismember(key, "nonexistent");
+ expect(isNonMember).toBe(false);
+ });
+
+ test("SREM command", async () => {
+ const key = ctx.generateKey("srem-test");
+
+ // Add multiple members
+ await ctx.redis.send("SADD", [key, "member1", "member2", "member3", "member4"]);
+
+ // Remove a single member
+ const singleRemResult = await ctx.redis.srem(key, "member1");
+ expectType(singleRemResult, "number");
+ expect(singleRemResult).toBe(1); // 1 member removed
+
+ // Remove multiple members using sendCommand
+ const multiRemResult = await ctx.redis.send("SREM", [key, "member2", "member3", "nonexistent"]);
+ expectType(multiRemResult, "number");
+ expect(multiRemResult).toBe(2); // 2 members removed, non-existent member ignored
+
+ // Verify only member4 remains
+ const members = await ctx.redis.smembers(key);
+ expect(Array.isArray(members)).toBe(true);
+ expect(members.length).toBe(1);
+ expect(members[0]).toBe("member4");
+ });
+
+ test("SMEMBERS command", async () => {
+ const key = ctx.generateKey("smembers-test");
+
+ // Add members one at a time using direct sadd method
+ await ctx.redis.sadd(key, "apple");
+ await ctx.redis.sadd(key, "banana");
+ await ctx.redis.sadd(key, "cherry");
+
+ // Get all members using direct smembers method
+ const members = await ctx.redis.smembers(key);
+ expect(Array.isArray(members)).toBe(true);
+
+ // Sort for consistent snapshot since set members can come in any order
+ const sortedMembers = [...members].sort();
+ expect(sortedMembers).toMatchInlineSnapshot(`
+ [
+ "apple",
+ "banana",
+ "cherry",
+ ]
+ `);
+ });
+
+ test("SCARD command", async () => {
+ const key = ctx.generateKey("scard-test");
+
+ // Add members - using direct sadd method for first item, then send for multiple
+ await ctx.redis.sadd(key, "item1");
+ await ctx.redis.send("SADD", [key, "item2", "item3", "item4"]);
+
+ // Get set cardinality (size)
+ // TODO: When a direct scard method is implemented, use that instead
+ const size = await ctx.redis.send("SCARD", [key]);
+ expectType(size, "number");
+ expect(size).toMatchInlineSnapshot(`4`);
+
+ // Remove some members - using direct srem method for first item, then send for second
+ await ctx.redis.srem(key, "item1");
+ await ctx.redis.send("SREM", [key, "item2"]);
+
+ // Check size again
+ const updatedSize = await ctx.redis.send("SCARD", [key]);
+ expectType(updatedSize, "number");
+ expect(updatedSize).toMatchInlineSnapshot(`2`);
+ });
+ });
+
+ describe("Set Manipulation", () => {
+ test("SPOP command", async () => {
+ const key = ctx.generateKey("spop-test");
+
+ // Add members - using send for multiple values
+ // TODO: When a SADD method that supports multiple values is added, use that instead
+ await ctx.redis.send("SADD", [key, "red", "green", "blue", "yellow", "purple"]);
+
+ // Pop a single member - using direct spop method
+ const popResult = await ctx.redis.spop(key);
+ expect(popResult).toBeDefined();
+ expect(typeof popResult).toBe("string");
+
+ // Pop multiple members
+ // TODO: When SPOP method that supports count parameter is added, use that instead
+ const multiPopResult = await ctx.redis.send("SPOP", [key, "2"]);
+ expect(Array.isArray(multiPopResult)).toBe(true);
+ expect(multiPopResult.length).toMatchInlineSnapshot(`2`);
+
+ // Verify remaining members
+ // TODO: When a direct scard method is added, use that instead
+ const remainingCount = await ctx.redis.send("SCARD", [key]);
+ expectType(remainingCount, "number");
+ expect(remainingCount).toMatchInlineSnapshot(`2`); // 5 original - 1 - 2 = 2 remaining
+ });
+
+ test("SRANDMEMBER command", async () => {
+ const key = ctx.generateKey("srandmember-test");
+
+ // Add members - first with direct sadd, then with send for remaining
+ await ctx.redis.sadd(key, "one");
+ await ctx.redis.send("SADD", [key, "two", "three", "four", "five"]);
+
+ // Get a random member - using direct srandmember method
+ const randResult = await ctx.redis.srandmember(key);
+ expect(randResult).toBeDefined();
+ expect(typeof randResult).toBe("string");
+
+ // Get multiple random members
+ // TODO: When srandmember method with count parameter is added, use that instead
+ const multiRandResult = await ctx.redis.send("SRANDMEMBER", [key, "3"]);
+ expect(Array.isArray(multiRandResult)).toBe(true);
+ expect(multiRandResult.length).toMatchInlineSnapshot(`3`);
+
+ // Verify set is unchanged
+ const count = await ctx.redis.send("SCARD", [key]);
+ expectType(count, "number");
+ expect(count).toMatchInlineSnapshot(`5`); // All members still present unlike SPOP
+ });
+
+ test("SMOVE command", async () => {
+ const sourceKey = ctx.generateKey("smove-source");
+ const destinationKey = ctx.generateKey("smove-dest");
+
+ // Set up source and destination sets
+ await ctx.redis.send("SADD", [sourceKey, "a", "b", "c"]);
+ await ctx.redis.send("SADD", [destinationKey, "c", "d", "e"]);
+
+ // Move a member from source to destination
+ const moveResult = await ctx.redis.send("SMOVE", [sourceKey, destinationKey, "b"]);
+ expectType(moveResult, "number");
+ expect(moveResult).toBe(1); // 1 indicates success
+
+ // Try to move a non-existent member
+ const failedMoveResult = await ctx.redis.send("SMOVE", [sourceKey, destinationKey, "z"]);
+ expectType(failedMoveResult, "number");
+ expect(failedMoveResult).toBe(0); // 0 indicates failure
+
+ // Verify source set (should have "a" and "c" left)
+ const sourceMembers = await ctx.redis.smembers(sourceKey);
+ expect(Array.isArray(sourceMembers)).toBe(true);
+ expect(sourceMembers.length).toBe(2);
+ expect(sourceMembers).toContain("a");
+ expect(sourceMembers).toContain("c");
+ expect(sourceMembers).not.toContain("b");
+
+ // Verify destination set (should have "b", "c", "d", "e")
+ const destMembers = await ctx.redis.smembers(destinationKey);
+ expect(Array.isArray(destMembers)).toBe(true);
+ expect(destMembers.length).toBe(4);
+ expect(destMembers).toContain("b");
+ expect(destMembers).toContain("c");
+ expect(destMembers).toContain("d");
+ expect(destMembers).toContain("e");
+ });
+ });
+
+ describe("Set Operations", () => {
+ test("SUNION and SUNIONSTORE commands", async () => {
+ const set1 = ctx.generateKey("sunion-1");
+ const set2 = ctx.generateKey("sunion-2");
+ const set3 = ctx.generateKey("sunion-3");
+ const destSet = ctx.generateKey("sunion-dest");
+
+ // Set up test sets
+ await ctx.redis.send("SADD", [set1, "a", "b", "c"]);
+ await ctx.redis.send("SADD", [set2, "c", "d", "e"]);
+ await ctx.redis.send("SADD", [set3, "e", "f", "g"]);
+
+ // Get union of two sets
+ const unionResult = await ctx.redis.send("SUNION", [set1, set2]);
+ expect(Array.isArray(unionResult)).toBe(true);
+ expect(unionResult.length).toBe(5);
+ expect(unionResult).toContain("a");
+ expect(unionResult).toContain("b");
+ expect(unionResult).toContain("c");
+ expect(unionResult).toContain("d");
+ expect(unionResult).toContain("e");
+
+ // Store union of three sets
+ const storeResult = await ctx.redis.send("SUNIONSTORE", [destSet, set1, set2, set3]);
+ expectType(storeResult, "number");
+ expect(storeResult).toBe(7); // 7 unique members across all sets
+
+ // Verify destination set
+ const destMembers = await ctx.redis.smembers(destSet);
+ expect(Array.isArray(destMembers)).toBe(true);
+ expect(destMembers.length).toBe(7);
+ expect(destMembers).toContain("a");
+ expect(destMembers).toContain("b");
+ expect(destMembers).toContain("c");
+ expect(destMembers).toContain("d");
+ expect(destMembers).toContain("e");
+ expect(destMembers).toContain("f");
+ expect(destMembers).toContain("g");
+ });
+
+ test("SINTER and SINTERSTORE commands", async () => {
+ const set1 = ctx.generateKey("sinter-1");
+ const set2 = ctx.generateKey("sinter-2");
+ const set3 = ctx.generateKey("sinter-3");
+ const destSet = ctx.generateKey("sinter-dest");
+
+ // Set up test sets
+ await ctx.redis.send("SADD", [set1, "a", "b", "c", "d"]);
+ await ctx.redis.send("SADD", [set2, "c", "d", "e"]);
+ await ctx.redis.send("SADD", [set3, "a", "c", "e"]);
+
+ // Get intersection of two sets
+ const interResult = await ctx.redis.send("SINTER", [set1, set2]);
+ expect(Array.isArray(interResult)).toBe(true);
+ expect(interResult.length).toBe(2);
+ expect(interResult).toContain("c");
+ expect(interResult).toContain("d");
+
+ // Store intersection of three sets
+ const storeResult = await ctx.redis.send("SINTERSTORE", [destSet, set1, set2, set3]);
+ expectType(storeResult, "number");
+ expect(storeResult).toBe(1); // Only "c" is in all three sets
+
+ // Verify destination set
+ const destMembers = await ctx.redis.smembers(destSet);
+ expect(Array.isArray(destMembers)).toBe(true);
+ expect(destMembers.length).toBe(1);
+ expect(destMembers[0]).toBe("c");
+ });
+
+ test("SDIFF and SDIFFSTORE commands", async () => {
+ const set1 = ctx.generateKey("sdiff-1");
+ const set2 = ctx.generateKey("sdiff-2");
+ const destSet = ctx.generateKey("sdiff-dest");
+
+ // Set up test sets
+ await ctx.redis.send("SADD", [set1, "a", "b", "c", "d"]);
+ await ctx.redis.send("SADD", [set2, "c", "d", "e"]);
+
+ // Get difference (elements in set1 that aren't in set2)
+ const diffResult = await ctx.redis.send("SDIFF", [set1, set2]);
+ expect(Array.isArray(diffResult)).toBe(true);
+ expect(diffResult.length).toBe(2);
+ expect(diffResult).toContain("a");
+ expect(diffResult).toContain("b");
+
+ // Store difference
+ const storeResult = await ctx.redis.send("SDIFFSTORE", [destSet, set1, set2]);
+ expectType(storeResult, "number");
+ expect(storeResult).toBe(2); // "a" and "b" are only in set1
+
+ // Verify destination set
+ const destMembers = await ctx.redis.smembers(destSet);
+ expect(Array.isArray(destMembers)).toBe(true);
+ expect(destMembers.length).toBe(2);
+ expect(destMembers).toContain("a");
+ expect(destMembers).toContain("b");
+ });
+ });
+
+ describe("Scanning Operations", () => {
+ test("SSCAN command", async () => {
+ const key = ctx.generateKey("sscan-test");
+
+ // Create a set with many members
+ const memberCount = 100;
+ const members = [];
+ for (let i = 0; i < memberCount; i++) {
+ members.push(`member:${i}`);
+ }
+
+ await ctx.redis.send("SADD", [key, ...members]);
+
+ // Use SSCAN to iterate through members
+ const scanResult = await ctx.redis.send("SSCAN", [key, "0", "COUNT", "20"]);
+ expect(Array.isArray(scanResult)).toBe(true);
+ expect(scanResult.length).toBe(2);
+
+ const cursor = scanResult[0];
+ const items = scanResult[1];
+
+ // Cursor should be either "0" (done) or a string number
+ expect(typeof cursor).toBe("string");
+
+ // Items should be an array of members
+ expect(Array.isArray(items)).toBe(true);
+
+ // All results should match our expected pattern
+ for (const item of items) {
+ expect(item.startsWith("member:")).toBe(true);
+ }
+
+ // Verify MATCH pattern works
+ const patternResult = await ctx.redis.send("SSCAN", [key, "0", "MATCH", "member:1*", "COUNT", "1000"]);
+ expect(Array.isArray(patternResult)).toBe(true);
+ expect(patternResult.length).toBe(2);
+
+ const patternItems = patternResult[1];
+ expect(Array.isArray(patternItems)).toBe(true);
+
+ // Should return only members that match the pattern (member:1, member:10-19, etc)
+ // There should be at least "member:1" and "member:10" through "member:19"
+ expect(patternItems.length).toBeGreaterThan(0);
+
+ for (const item of patternItems) {
+ expect(item.startsWith("member:1")).toBe(true);
+ }
+ });
+ });
+});
diff --git a/test/js/valkey/valkey.test.ts b/test/js/valkey/valkey.test.ts
new file mode 100644
index 0000000000..f15d14425e
--- /dev/null
+++ b/test/js/valkey/valkey.test.ts
@@ -0,0 +1,177 @@
+import { describe, test, expect, beforeEach } from "bun:test";
+import { randomUUIDv7, RedisClient } from "bun";
+import { createClient, ctx, DEFAULT_REDIS_URL, ConnectionType, isEnabled } from "./test-utils";
+import { expectType } from "./test-utils";
+
+describe.skipIf(!isEnabled)("Valkey Redis Client", () => {
+ beforeEach(() => {
+ if (ctx.redis?.connected) {
+ ctx.redis.disconnect?.();
+ }
+ ctx.redis = createClient(ConnectionType.TCP);
+ });
+
+ describe("Basic Operations", () => {
+ test("should set and get strings", async () => {
+ const redis = ctx.redis;
+ const testKey = "greeting";
+ const testValue = "Hello from Bun Redis!";
+
+ // Using direct set and get methods
+ const setResult = await redis.set(testKey, testValue);
+ expect(setResult).toMatchInlineSnapshot(`"OK"`);
+
+ // GET should return the value we set
+ const getValue = await redis.get(testKey);
+ expect(getValue).toMatchInlineSnapshot(`"Hello from Bun Redis!"`);
+ });
+
+ test("should test key existence", async () => {
+ const redis = ctx.redis;
+ // Let's set a key first
+ await redis.set("greeting", "test existence");
+
+ // EXISTS in Redis normally returns integer 1 if key exists, 0 if not
+ // The current implementation doesn't transform exists correctly yet
+ const exists = await redis.exists("greeting");
+ expect(exists).toBeDefined();
+ // Should be true for existing keys (fixed in special handling for EXISTS)
+ expect(exists).toBe(true);
+
+ // For non-existent keys
+ const randomKey = "nonexistent-key-" + randomUUIDv7();
+ const notExists = await redis.exists(randomKey);
+ expect(notExists).toBeDefined();
+ // Should be false for non-existing keys
+ expect(notExists).toBe(false);
+ });
+
+ test("should increment and decrement counters", async () => {
+ const redis = ctx.redis;
+ const counterKey = "counter";
+ // First set a counter value
+ await redis.set(counterKey, "10");
+
+ // INCR should increment and return the new value
+ const incrementedValue = await redis.incr(counterKey);
+ expect(incrementedValue).toBeDefined();
+ expect(typeof incrementedValue).toBe("number");
+ expect(incrementedValue).toBe(11);
+
+ // DECR should decrement and return the new value
+ const decrementedValue = await redis.decr(counterKey);
+ expect(decrementedValue).toBeDefined();
+ expect(typeof decrementedValue).toBe("number");
+ expect(decrementedValue).toBe(10);
+ });
+
+ test("should manage key expiration", async () => {
+ const redis = ctx.redis;
+ // Set a key first
+ const tempKey = "temporary";
+ await redis.set(tempKey, "will expire");
+
+ // EXPIRE should return 1 if the timeout was set, 0 otherwise
+ const result = await redis.expire(tempKey, 60);
+ // Using native expire command instead of send()
+ expect(result).toMatchInlineSnapshot(`1`);
+
+ // Use the TTL command directly
+ const ttl = await redis.ttl(tempKey);
+ expectType(ttl, "number");
+ expect(ttl).toBeGreaterThan(0);
+ expect(ttl).toBeLessThanOrEqual(60); // Should be positive and not exceed our set time
+ });
+
+ test("should implement TTL command correctly for different cases", async () => {
+ const redis = ctx.redis;
+ // 1. Key with expiration
+ const tempKey = "ttl-test-key";
+ await redis.set(tempKey, "ttl test value");
+ await redis.expire(tempKey, 60);
+
+ // Use native ttl command
+ const ttl = await redis.ttl(tempKey);
+ expectType(ttl, "number");
+ expect(ttl).toBeGreaterThan(0);
+ expect(ttl).toBeLessThanOrEqual(60);
+
+ // 2. Key with no expiration
+ const permanentKey = "permanent-key";
+ await redis.set(permanentKey, "no expiry");
+ const noExpiry = await redis.ttl(permanentKey);
+ expect(noExpiry).toMatchInlineSnapshot(`-1`); // -1 indicates no expiration
+
+ // 3. Non-existent key
+ const nonExistentKey = "non-existent-" + randomUUIDv7();
+ const noKey = await redis.ttl(nonExistentKey);
+ expect(noKey).toMatchInlineSnapshot(`-2`); // -2 indicates key doesn't exist
+ });
+ });
+
+ describe("Connection State", () => {
+ test("should have a connected property", () => {
+ const redis = ctx.redis;
+ // The client should expose a connected property
+ expect(typeof redis.connected).toBe("boolean");
+ });
+ });
+
+ describe("RESP3 Data Types", () => {
+ test("should handle hash maps (dictionaries) as command responses", async () => {
+ const redis = ctx.redis;
+ // HSET multiple fields
+ const userId = "user:" + randomUUIDv7().substring(0, 8);
+ const setResult = await redis.send("HSET", [userId, "name", "John", "age", "30", "active", "true"]);
+ expect(setResult).toBeDefined();
+
+ // HGETALL returns object with key-value pairs
+ const hash = await redis.send("HGETALL", [userId]);
+ expect(hash).toBeDefined();
+
+ // Proper structure checking when RESP3 maps are fixed
+ if (typeof hash === "object" && hash !== null) {
+ expect(hash).toHaveProperty("name");
+ expect(hash).toHaveProperty("age");
+ expect(hash).toHaveProperty("active");
+
+ expect(hash.name).toBe("John");
+ expect(hash.age).toBe("30");
+ expect(hash.active).toBe("true");
+ }
+ });
+
+ test("should handle sets as command responses", async () => {
+ const redis = ctx.redis;
+ // Add items to a set
+ const setKey = "colors:" + randomUUIDv7().substring(0, 8);
+ const addResult = await redis.send("SADD", [setKey, "red", "blue", "green"]);
+ expect(addResult).toBeDefined();
+
+ // Get set members
+ const setMembers = await redis.send("SMEMBERS", [setKey]);
+ expect(setMembers).toBeDefined();
+
+ // Check if the response is an array
+ expect(Array.isArray(setMembers)).toBe(true);
+
+ // Should contain our colors
+ expect(setMembers).toContain("red");
+ expect(setMembers).toContain("blue");
+ expect(setMembers).toContain("green");
+ });
+ });
+
+ describe("Connection Options", () => {
+ test("connection errors", async () => {
+ const url = new URL(DEFAULT_REDIS_URL);
+ url.username = "badusername";
+ url.password = "secretpassword";
+ const customRedis = new RedisClient(url.toString());
+
+ expect(async () => {
+ await customRedis.get("test");
+ }).toThrowErrorMatchingInlineSnapshot(`"WRONGPASS invalid username-password pair or user is disabled."`);
+ });
+ });
+});