mirror of
https://github.com/oven-sh/bun
synced 2026-02-16 22:01:47 +00:00
Compare commits
5 Commits
deps/updat
...
claude/doc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c82c16a44a | ||
|
|
9a2177b4df | ||
|
|
b90d879035 | ||
|
|
d4a0a9e545 | ||
|
|
0aa824e05c |
@@ -62,6 +62,7 @@ Routes in `Bun.serve()` receive a `BunRequest` (which extends [`Request`](https:
|
||||
interface BunRequest<T extends string> extends Request {
|
||||
params: Record<T, string>;
|
||||
readonly cookies: CookieMap;
|
||||
readonly query: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -103,6 +103,93 @@ await redis.expire("session:123", 3600); // expires in 1 hour
|
||||
|
||||
// Get time to live (in seconds)
|
||||
const ttl = await redis.ttl("session:123");
|
||||
|
||||
// Set if not exists
|
||||
await redis.setnx("lock", "token");
|
||||
|
||||
// Set with expiration (seconds)
|
||||
await redis.setex("temp", 60, "value");
|
||||
|
||||
// Set with expiration (milliseconds)
|
||||
await redis.psetex("temp", 5000, "value");
|
||||
|
||||
// Get and set in one operation
|
||||
const oldValue = await redis.getset("key", "new-value");
|
||||
|
||||
// Get and delete
|
||||
const value = await redis.getdel("key");
|
||||
|
||||
// Get and set expiration
|
||||
const currentValue = await redis.getex("key", "EX", 60);
|
||||
|
||||
// Get multiple keys
|
||||
const values = await redis.mget(["key1", "key2", "key3"]);
|
||||
|
||||
// Set multiple keys
|
||||
await redis.mset(["key1", "value1", "key2", "value2"]);
|
||||
|
||||
// Set multiple keys if none exist
|
||||
const success = await redis.msetnx(["key1", "value1", "key2", "value2"]);
|
||||
|
||||
// Append to a string
|
||||
await redis.append("log", "new entry\n");
|
||||
|
||||
// Get string length
|
||||
const length = await redis.strlen("key");
|
||||
|
||||
// Set expiration at specific timestamp (seconds)
|
||||
await redis.expireat("key", Math.floor(Date.now() / 1000) + 3600);
|
||||
|
||||
// Set expiration (milliseconds)
|
||||
await redis.pexpire("key", 60000);
|
||||
|
||||
// Set expiration at specific timestamp (milliseconds)
|
||||
await redis.pexpireat("key", Date.now() + 60000);
|
||||
|
||||
// Get expiration timestamp (seconds)
|
||||
const expiresAt = await redis.expiretime("key");
|
||||
|
||||
// Get expiration timestamp (milliseconds)
|
||||
const expiresAtMs = await redis.pexpiretime("key");
|
||||
|
||||
// Get time to live (milliseconds)
|
||||
const ttlMs = await redis.pttl("key");
|
||||
|
||||
// Remove expiration
|
||||
await redis.persist("key");
|
||||
|
||||
// Get bit value at offset
|
||||
const bit = await redis.getbit("key", 7);
|
||||
|
||||
// Set bit value at offset
|
||||
await redis.setbit("key", 7, 1);
|
||||
|
||||
// Count set bits
|
||||
const count = await redis.bitcount("key");
|
||||
|
||||
// Get substring
|
||||
const substring = await redis.getrange("key", 0, 10);
|
||||
|
||||
// Set substring
|
||||
await redis.setrange("key", 5, "replacement");
|
||||
|
||||
// Copy key
|
||||
await redis.copy("source", "destination");
|
||||
|
||||
// Rename key
|
||||
await redis.rename("old-key", "new-key");
|
||||
|
||||
// Rename if new key doesn't exist
|
||||
const renamed = await redis.renamenx("old-key", "new-key");
|
||||
|
||||
// Delete key asynchronously
|
||||
await redis.unlink("key");
|
||||
|
||||
// Update last access time
|
||||
await redis.touch("key1", "key2");
|
||||
|
||||
// Serialize value
|
||||
const serialized = await redis.dump("key");
|
||||
```
|
||||
|
||||
### Numeric Operations
|
||||
@@ -116,6 +203,15 @@ await redis.incr("counter");
|
||||
|
||||
// Decrement by 1
|
||||
await redis.decr("counter");
|
||||
|
||||
// Increment by a specific value
|
||||
await redis.incrby("counter", 5);
|
||||
|
||||
// Decrement by a specific value
|
||||
await redis.decrby("counter", 3);
|
||||
|
||||
// Increment by a float value
|
||||
await redis.incrbyfloat("counter", 2.5);
|
||||
```
|
||||
|
||||
### Hash Operations
|
||||
@@ -144,6 +240,40 @@ await redis.hincrby("user:123", "visits", 1);
|
||||
|
||||
// Increment a float field in a hash
|
||||
await redis.hincrbyfloat("user:123", "score", 1.5);
|
||||
|
||||
// Set a single field
|
||||
await redis.hset("user:123", "name", "Bob");
|
||||
|
||||
// Set field if it doesn't exist
|
||||
const created = await redis.hsetnx("user:123", "id", "123");
|
||||
|
||||
// Delete fields
|
||||
await redis.hdel("user:123", "email", "phone");
|
||||
|
||||
// Check if field exists
|
||||
const hasEmail = await redis.hexists("user:123", "email");
|
||||
|
||||
// Get all fields and values
|
||||
const allData = await redis.hgetall("user:123");
|
||||
|
||||
// Get all field names
|
||||
const fields = await redis.hkeys("user:123");
|
||||
|
||||
// Get all values
|
||||
const values = await redis.hvals("user:123");
|
||||
|
||||
// Get number of fields
|
||||
const fieldCount = await redis.hlen("user:123");
|
||||
|
||||
// Get string length of field value
|
||||
const nameLength = await redis.hstrlen("user:123", "name");
|
||||
|
||||
// Get random field(s)
|
||||
const randomField = await redis.hrandfield("user:123");
|
||||
const randomFields = await redis.hrandfield("user:123", 2);
|
||||
|
||||
// Scan hash fields
|
||||
const [cursor, fields] = await redis.hscan("user:123", 0);
|
||||
```
|
||||
|
||||
### Set Operations
|
||||
@@ -166,6 +296,236 @@ const randomTag = await redis.srandmember("tags");
|
||||
|
||||
// Pop (remove and return) a random member
|
||||
const poppedTag = await redis.spop("tags");
|
||||
|
||||
// Get set size
|
||||
const size = await redis.scard("tags");
|
||||
|
||||
// Move member between sets
|
||||
await redis.smove("source-set", "dest-set", "member");
|
||||
|
||||
// Check multiple members
|
||||
const results = await redis.smismember("tags", ["javascript", "python", "rust"]);
|
||||
|
||||
// Difference between sets
|
||||
const diff = await redis.sdiff("set1", "set2");
|
||||
|
||||
// Store difference in new set
|
||||
await redis.sdiffstore("result", "set1", "set2");
|
||||
|
||||
// Intersection of sets
|
||||
const intersection = await redis.sinter("set1", "set2");
|
||||
|
||||
// Count intersection
|
||||
const intersectionCount = await redis.sintercard("set1", "set2");
|
||||
|
||||
// Store intersection in new set
|
||||
await redis.sinterstore("result", "set1", "set2");
|
||||
|
||||
// Union of sets
|
||||
const union = await redis.sunion("set1", "set2");
|
||||
|
||||
// Store union in new set
|
||||
await redis.sunionstore("result", "set1", "set2");
|
||||
|
||||
// Scan set members
|
||||
const [cursor, members] = await redis.sscan("tags", 0);
|
||||
```
|
||||
|
||||
### Sorted Set Operations
|
||||
|
||||
```ts
|
||||
// Add members with scores
|
||||
await redis.zadd("leaderboard", 100, "player1");
|
||||
await redis.zadd("leaderboard", 200, "player2", 150, "player3");
|
||||
|
||||
// Remove members
|
||||
await redis.zrem("leaderboard", "player1");
|
||||
|
||||
// Get number of members
|
||||
const count = await redis.zcard("leaderboard");
|
||||
|
||||
// Count members in score range
|
||||
const rangeCount = await redis.zcount("leaderboard", 100, 200);
|
||||
|
||||
// Get member score
|
||||
const score = await redis.zscore("leaderboard", "player1");
|
||||
|
||||
// Get multiple scores
|
||||
const scores = await redis.zmscore("leaderboard", ["player1", "player2"]);
|
||||
|
||||
// Get member rank (0-based, lowest to highest)
|
||||
const rank = await redis.zrank("leaderboard", "player1");
|
||||
|
||||
// Get member rank (0-based, highest to lowest)
|
||||
const revRank = await redis.zrevrank("leaderboard", "player1");
|
||||
|
||||
// Increment member score
|
||||
await redis.zincrby("leaderboard", 10, "player1");
|
||||
|
||||
// Get range by index
|
||||
const topPlayers = await redis.zrange("leaderboard", 0, 9);
|
||||
|
||||
// Get range by index (reverse order)
|
||||
const topPlayersDesc = await redis.zrevrange("leaderboard", 0, 9);
|
||||
|
||||
// Get range by score
|
||||
const players = await redis.zrangebyscore("leaderboard", 100, 200);
|
||||
|
||||
// Get range by score (reverse)
|
||||
const playersDesc = await redis.zrevrangebyscore("leaderboard", 200, 100);
|
||||
|
||||
// Get range by lexicographic order
|
||||
const names = await redis.zrangebylex("names", "[a", "[z");
|
||||
|
||||
// Get range by lex (reverse)
|
||||
const namesRev = await redis.zrevrangebylex("names", "[z", "[a");
|
||||
|
||||
// Count members in lex range
|
||||
const lexCount = await redis.zlexcount("names", "[a", "[z");
|
||||
|
||||
// Store range result
|
||||
await redis.zrangestore("result", "leaderboard", 0, 9);
|
||||
|
||||
// Remove members by lex range
|
||||
await redis.zremrangebylex("names", "[a", "[c");
|
||||
|
||||
// Remove members by rank
|
||||
await redis.zremrangebyrank("leaderboard", 0, 9);
|
||||
|
||||
// Remove members by score
|
||||
await redis.zremrangebyscore("leaderboard", 0, 100);
|
||||
|
||||
// Pop member with lowest score
|
||||
const lowest = await redis.zpopmin("leaderboard");
|
||||
|
||||
// Pop member with highest score
|
||||
const highest = await redis.zpopmax("leaderboard");
|
||||
|
||||
// Blocking pop lowest
|
||||
const [key, member, score] = await redis.bzpopmin("leaderboard", 5);
|
||||
|
||||
// Blocking pop highest
|
||||
const [key, member, score] = await redis.bzpopmax("leaderboard", 5);
|
||||
|
||||
// Pop from multiple sorted sets
|
||||
const popped = await redis.zmpop("leaderboard1", "leaderboard2");
|
||||
|
||||
// Blocking pop from multiple sorted sets
|
||||
const blockedPop = await redis.bzmpop(5, "leaderboard1", "leaderboard2");
|
||||
|
||||
// Difference between sorted sets
|
||||
const diff = await redis.zdiff("set1", "set2");
|
||||
|
||||
// Store difference
|
||||
await redis.zdiffstore("result", "set1", "set2");
|
||||
|
||||
// Intersection of sorted sets
|
||||
const intersection = await redis.zinter("set1", "set2");
|
||||
|
||||
// Count intersection
|
||||
const interCount = await redis.zintercard("set1", "set2");
|
||||
|
||||
// Store intersection
|
||||
await redis.zinterstore("result", "set1", "set2");
|
||||
|
||||
// Union of sorted sets
|
||||
const union = await redis.zunion("set1", "set2");
|
||||
|
||||
// Store union
|
||||
await redis.zunionstore("result", "set1", "set2");
|
||||
|
||||
// Get random member(s)
|
||||
const random = await redis.zrandmember("leaderboard");
|
||||
const randomWithScores = await redis.zrandmember("leaderboard", 3, true);
|
||||
|
||||
// Scan sorted set
|
||||
const [cursor, members] = await redis.zscan("leaderboard", 0);
|
||||
```
|
||||
|
||||
### List Operations
|
||||
|
||||
```ts
|
||||
// Push to left (head)
|
||||
await redis.lpush("queue", "item1");
|
||||
|
||||
// Push to right (tail)
|
||||
await redis.rpush("queue", "item2");
|
||||
|
||||
// Pop from left
|
||||
const leftItem = await redis.lpop("queue");
|
||||
|
||||
// Pop from right
|
||||
const rightItem = await redis.rpop("queue");
|
||||
|
||||
// Push to left if list exists
|
||||
await redis.lpushx("queue", "item");
|
||||
|
||||
// Push to right if list exists
|
||||
await redis.rpushx("queue", "item");
|
||||
|
||||
// Get list length
|
||||
const length = await redis.llen("queue");
|
||||
|
||||
// Get range of elements
|
||||
const items = await redis.lrange("queue", 0, -1);
|
||||
|
||||
// Get element by index
|
||||
const item = await redis.lindex("queue", 0);
|
||||
|
||||
// Set element by index
|
||||
await redis.lset("queue", 0, "new-value");
|
||||
|
||||
// Insert before/after element
|
||||
await redis.linsert("queue", "BEFORE", "pivot", "new-item");
|
||||
|
||||
// Remove elements
|
||||
await redis.lrem("queue", 2, "value"); // remove first 2 occurrences
|
||||
|
||||
// Trim list to range
|
||||
await redis.ltrim("queue", 0, 99);
|
||||
|
||||
// Find position of element
|
||||
const position = await redis.lpos("queue", "item");
|
||||
|
||||
// Move element between lists
|
||||
await redis.lmove("source", "dest", "LEFT", "RIGHT");
|
||||
|
||||
// Pop from multiple lists
|
||||
const popped = await redis.lmpop("list1", "list2", "LEFT");
|
||||
|
||||
// Pop from right, push to left (atomic)
|
||||
await redis.rpoplpush("source", "dest");
|
||||
|
||||
// Blocking pop from left
|
||||
const [key, value] = await redis.blpop("queue", 5);
|
||||
|
||||
// Blocking pop from right
|
||||
const [key, value] = await redis.brpop("queue", 5);
|
||||
|
||||
// Blocking move
|
||||
await redis.blmove("source", "dest", "LEFT", "RIGHT", 5);
|
||||
|
||||
// Blocking pop from multiple lists
|
||||
const result = await redis.blmpop(5, "list1", "list2", "LEFT");
|
||||
|
||||
// Blocking rpoplpush
|
||||
await redis.brpoplpush("source", "dest", 5);
|
||||
```
|
||||
|
||||
### Key Management
|
||||
|
||||
```ts
|
||||
// Find keys matching pattern
|
||||
const keys = await redis.keys("user:*");
|
||||
|
||||
// Scan keys with cursor
|
||||
const [cursor, foundKeys] = await redis.scan(0, "MATCH", "user:*", "COUNT", 100);
|
||||
|
||||
// Get key type
|
||||
const keyType = await redis.type("mykey");
|
||||
|
||||
// Get random key
|
||||
const randomKey = await redis.randomkey();
|
||||
```
|
||||
|
||||
## Pub/Sub
|
||||
|
||||
@@ -362,24 +362,32 @@ await sql`UPDATE users SET ${sql(user, "name", "email")} WHERE id = ${user.id}`;
|
||||
await sql`UPDATE users SET ${sql(user)} WHERE id = ${user.id}`;
|
||||
```
|
||||
|
||||
### Dynamic values and `where in`
|
||||
### Dynamic values and `WHERE IN`
|
||||
|
||||
Value lists can also be created dynamically, making where in queries simple too. Optionally you can pass a array of objects and inform what key to use to create the list.
|
||||
Value lists can be created dynamically for `IN` clauses. You can pass an array of values directly, or extract a specific column from an array of objects:
|
||||
|
||||
```ts
|
||||
// Direct array of values
|
||||
await sql`SELECT * FROM users WHERE id IN ${sql([1, 2, 3])}`;
|
||||
|
||||
// Extract column from array of objects
|
||||
const users = [
|
||||
{ id: 1, name: "Alice" },
|
||||
{ id: 2, name: "Bob" },
|
||||
{ id: 3, name: "Charlie" },
|
||||
];
|
||||
await sql`SELECT * FROM users WHERE id IN ${sql(users, "id")}`;
|
||||
// Expands to: WHERE id IN (1, 2, 3)
|
||||
|
||||
// Single object also works (extracts the value)
|
||||
const singleUser = { id: 1, name: "Alice" };
|
||||
await sql`SELECT * FROM users WHERE id IN ${sql(singleUser, "id")}`;
|
||||
// Expands to: WHERE id IN (1)
|
||||
```
|
||||
|
||||
### `sql.array` helper
|
||||
|
||||
The `sql.array` helper creates PostgreSQL array literals from JavaScript arrays:
|
||||
The `sql.array` helper creates PostgreSQL array literals from JavaScript arrays. It supports a wide range of PostgreSQL types with explicit type specification:
|
||||
|
||||
```ts
|
||||
// Create array literals for PostgreSQL
|
||||
@@ -389,9 +397,50 @@ await sql`INSERT INTO tags (items) VALUES (${sql.array(["red", "blue", "green"])
|
||||
// Works with numeric arrays too
|
||||
await sql`SELECT * FROM products WHERE ids = ANY(${sql.array([1, 2, 3])})`;
|
||||
// Generates: SELECT * FROM products WHERE ids = ANY(ARRAY[1, 2, 3])
|
||||
|
||||
// With explicit type specification (recommended for clarity)
|
||||
await sql`INSERT INTO scores VALUES (${sql.array([1, 2, 3], "INT")})`;
|
||||
```
|
||||
|
||||
**Note**: `sql.array` is PostgreSQL-only. Multi-dimensional arrays and NULL elements may not be supported yet.
|
||||
#### Supported Array Types
|
||||
|
||||
The `sql.array` helper supports the following PostgreSQL types as the second parameter:
|
||||
|
||||
**Numeric Types:**
|
||||
- `INT`, `SMALLINT`, `BIGINT` - Integer types
|
||||
- `REAL`, `DOUBLE PRECISION` - Floating-point types
|
||||
- `NUMERIC` - Arbitrary precision numbers
|
||||
|
||||
**String Types:**
|
||||
- `TEXT`, `VARCHAR`, `CHAR` - Text types
|
||||
|
||||
**Date/Time Types:**
|
||||
- `DATE` - Date values
|
||||
- `TIME` - Time of day (string format: `"HH:MM:SS"`)
|
||||
- `TIMESTAMP`, `TIMESTAMPTZ` - Timestamp values
|
||||
- `INTERVAL` - Time intervals
|
||||
|
||||
**Network Types:**
|
||||
- `INET` - IP addresses (IPv4 and IPv6)
|
||||
- `CIDR` - Network addresses
|
||||
- `MACADDR` - MAC addresses
|
||||
|
||||
**Other Types:**
|
||||
- `UUID` - Universally unique identifiers
|
||||
- `BOOLEAN` - Boolean values
|
||||
- `BIT`, `VARBIT` - Bit strings
|
||||
- `MONEY` - Currency amounts
|
||||
- `POINT`, `BOX`, `CIRCLE` - Geometric types
|
||||
|
||||
```ts
|
||||
// Examples with different types
|
||||
await sql`SELECT ${sql.array([1, 2, 3], "INT")} as numbers`;
|
||||
await sql`SELECT ${sql.array(["12:30:45", "18:45:30"], "TIME")} as times`;
|
||||
await sql`SELECT ${sql.array([new Date(), new Date()], "TIMESTAMP")} as timestamps`;
|
||||
await sql`SELECT ${sql.array(["192.168.1.1", "10.0.0.1"], "INET")} as ips`;
|
||||
```
|
||||
|
||||
**Note**: `sql.array` is PostgreSQL-only. Multi-dimensional arrays and NULL elements may not be fully supported yet.
|
||||
|
||||
## `sql``.simple()`
|
||||
|
||||
@@ -413,12 +462,38 @@ Note that simple queries cannot use parameters (`${value}`). If you need paramet
|
||||
|
||||
### Queries in files
|
||||
|
||||
You can use the `sql.file` method to read a query from a file and execute it, if the file includes $1, $2, etc you can pass parameters to the query. If no parameters are used it can execute multiple commands per file.
|
||||
You can use the `sql.file` method to read a query from a file and execute it. This is useful for organizing complex queries or database migrations in separate files.
|
||||
|
||||
```ts
|
||||
// Execute a query file without parameters
|
||||
const result = await sql.file("query.sql");
|
||||
|
||||
// Pass parameters to the query (PostgreSQL: $1, $2, etc.; SQLite: ?, ?, etc.)
|
||||
const result = await sql.file("query.sql", [1, 2, 3]);
|
||||
|
||||
// Multiple statements are allowed when no parameters are used
|
||||
const result = await sql.file("schema.sql");
|
||||
```
|
||||
|
||||
**Example files:**
|
||||
|
||||
```sql
|
||||
-- query.sql (with parameters)
|
||||
SELECT * FROM users WHERE id = $1 AND age > $2
|
||||
```
|
||||
|
||||
```sql
|
||||
-- schema.sql (multiple statements, no parameters)
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO users (id, name) VALUES (1, 'Alice');
|
||||
INSERT INTO users (id, name) VALUES (2, 'Bob');
|
||||
```
|
||||
|
||||
The file path can be absolute or relative to the current working directory.
|
||||
|
||||
### Unsafe Queries
|
||||
|
||||
You can use the `sql.unsafe` function to execute raw SQL strings. Use this with caution, as it will not escape user input. Executing more than one command per query is allowed if no parameters are used.
|
||||
@@ -1242,7 +1317,7 @@ MySQL types are automatically converted to JavaScript types:
|
||||
| FLOAT, DOUBLE | number | |
|
||||
| DATE | Date | JavaScript Date object |
|
||||
| DATETIME, TIMESTAMP | Date | With timezone handling |
|
||||
| TIME | number | Total of microseconds |
|
||||
| TIME | string | Format: `"HH:MM:SS"` (e.g., `"12:30:45"`, `"838:59:59"`, `"-838:59:59"`) |
|
||||
| YEAR | number | |
|
||||
| CHAR, VARCHAR, VARSTRING, STRING | string | |
|
||||
| TINY TEXT, MEDIUM TEXT, TEXT, LONG TEXT | string | |
|
||||
|
||||
@@ -382,6 +382,12 @@ interface Server {
|
||||
data: string | ArrayBufferView | ArrayBuffer,
|
||||
compress?: boolean,
|
||||
): number;
|
||||
publishText(topic: string, data: string, compress?: boolean): number;
|
||||
publishBinary(
|
||||
topic: string,
|
||||
data: ArrayBufferView | ArrayBuffer,
|
||||
compress?: boolean,
|
||||
): number;
|
||||
upgrade(
|
||||
req: Request,
|
||||
options?: {
|
||||
@@ -396,12 +402,21 @@ interface ServerWebSocket {
|
||||
readonly readyState: number;
|
||||
readonly remoteAddress: string;
|
||||
send(message: string | ArrayBuffer | Uint8Array, compress?: boolean): number;
|
||||
sendText(message: string, compress?: boolean): number;
|
||||
sendBinary(message: ArrayBuffer | Uint8Array, compress?: boolean): number;
|
||||
close(code?: number, reason?: string): void;
|
||||
subscribe(topic: string): void;
|
||||
unsubscribe(topic: string): void;
|
||||
publish(topic: string, message: string | ArrayBuffer | Uint8Array): void;
|
||||
publishText(topic: string, message: string, compress?: boolean): number;
|
||||
publishBinary(
|
||||
topic: string,
|
||||
message: ArrayBuffer | Uint8Array,
|
||||
compress?: boolean,
|
||||
): number;
|
||||
isSubscribed(topic: string): boolean;
|
||||
cork(cb: (ws: ServerWebSocket) => void): void;
|
||||
getBufferedAmount(): number;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Bun provides built-in support for YAML files through both runtime APIs and bundl
|
||||
|
||||
## Conformance
|
||||
|
||||
Bun's YAML parser currently passes over 90% of the official YAML test suite. While we're actively working on reaching 100% conformance, the current implementation covers the vast majority of real-world use cases. The parser is written in Zig for optimal performance and is continuously being improved.
|
||||
Bun's YAML parser currently passes 90% of the official yaml-test-suite (362/400 tests). Known limitations include missing support for the `|-` and `|+` block scalar chomping operators and cyclic references in `Bun.YAML.parse()`. The parser is written in Zig for optimal performance and is continuously being improved.
|
||||
|
||||
## Runtime API
|
||||
|
||||
|
||||
@@ -264,6 +264,14 @@ Bun automatically handles serving the frontend assets with proper MIME types and
|
||||
|
||||
For more details on building full-stack applications with Bun, see the [full-stack guide](/docs/bundler/fullstack).
|
||||
|
||||
## Code splitting
|
||||
|
||||
Standalone executables now support code splitting via the `--splitting` flag. This enables dynamic imports to be split into separate chunks within the executable, reducing initial load time for applications with lazy-loaded modules.
|
||||
|
||||
```sh
|
||||
$ bun build --compile --splitting ./app.ts --outfile myapp
|
||||
```
|
||||
|
||||
## Worker
|
||||
|
||||
To use workers in a standalone executable, add the worker's entrypoint to the CLI arguments:
|
||||
@@ -591,7 +599,6 @@ Codesign support requires Bun v1.2.4 or newer.
|
||||
Currently, the `--compile` flag can only accept a single entrypoint at a time and does not support the following flags:
|
||||
|
||||
- `--outdir` — use `outfile` instead.
|
||||
- `--splitting`
|
||||
- `--public-path`
|
||||
- `--target=node` or `--target=browser`
|
||||
- `--no-bundle` - we always bundle everything into the executable.
|
||||
|
||||
@@ -46,7 +46,7 @@ You can also pass glob patterns to filter by workspace names:
|
||||
|
||||
### Catalog Dependencies
|
||||
|
||||
`bun outdated` supports checking catalog dependencies defined in `package.json`:
|
||||
`bun outdated` supports checking catalog dependencies defined in `package.json`. As of commit 624911180f, catalog information is shown by default without requiring the `--filter` or `-r` flags.
|
||||
|
||||
```sh
|
||||
$ bun outdated -r
|
||||
|
||||
@@ -102,6 +102,21 @@ XML report to the given path at the very end of the test run.
|
||||
|
||||
JUnit XML is a popular format for reporting test results in CI/CD pipelines.
|
||||
|
||||
## Breaking changes in v1.3
|
||||
|
||||
### CI environment restrictions
|
||||
|
||||
When running in CI environments (GitHub Actions, GitLab CI, etc.), Bun now enforces stricter test practices:
|
||||
|
||||
- **Creating snapshots is disallowed** - Tests using `.toMatchSnapshot()` or `.toMatchInlineSnapshot()` will fail if the snapshot doesn't already exist. Use `--update-snapshots` locally to generate snapshots before committing.
|
||||
- **`.only()` is disallowed** - Tests marked with `.only()` will fail. This prevents accidentally committing focused tests that skip the rest of your test suite.
|
||||
|
||||
CI environments are detected via environment variables like `CI=true`, `GITHUB_ACTIONS=true`, `GITLAB_CI=true`, etc.
|
||||
|
||||
### ErrorBuilder.reject() behavior
|
||||
|
||||
`ErrorBuilder.reject()` now triggers unhandled rejection handlers, making error handling more consistent with JavaScript promise semantics. Previously, rejections from `ErrorBuilder.reject()` would not call these handlers, potentially hiding errors in production code.
|
||||
|
||||
## Timeouts
|
||||
|
||||
Use the `--timeout` flag to specify a _per-test_ timeout in milliseconds. If a test times out, it will be marked as failed. The default value is `5000`.
|
||||
|
||||
@@ -38,9 +38,19 @@ In the root `package.json`, the `"workspaces"` key is used to indicate which sub
|
||||
```
|
||||
|
||||
{% callout %}
|
||||
**Glob support** — Bun supports full glob syntax in `"workspaces"` (see [here](https://bun.com/docs/api/glob#supported-glob-patterns) for a comprehensive list of supported syntax), _except_ for exclusions (e.g. `!**/excluded/**`), which are not implemented yet.
|
||||
**Glob support** — Bun supports full glob syntax in `"workspaces"` (see [here](https://bun.com/docs/api/glob#supported-glob-patterns) for a comprehensive list of supported syntax).
|
||||
{% /callout %}
|
||||
|
||||
To exclude specific packages, prefix patterns with `!`:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "my-project",
|
||||
"version": "1.0.0",
|
||||
"workspaces": ["packages/*", "!packages/ignore"]
|
||||
}
|
||||
```
|
||||
|
||||
Each workspace has it's own `package.json`. When referencing other packages in the monorepo, semver or workspace protocols (e.g. `workspace:*`) can be used as the version field in your `package.json`.
|
||||
|
||||
```json
|
||||
|
||||
@@ -36,7 +36,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:fs`](https://nodejs.org/api/fs.html)
|
||||
|
||||
🟢 Fully implemented. 92% of Node.js's test suite passes.
|
||||
🟢 Fully implemented. 92% of Node.js's test suite passes. On Linux, `fs.stat` uses the `statx` syscall to provide file creation time (birthtime) support, falling back to traditional `stat` on kernels older than 4.11.
|
||||
|
||||
### [`node:http`](https://nodejs.org/api/http.html)
|
||||
|
||||
@@ -218,6 +218,10 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
🟢 Fully implemented.
|
||||
|
||||
### [`AsyncDisposableStack`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/AsyncDisposableStack)
|
||||
|
||||
🟢 Fully implemented. Used with `await using` syntax for explicit async resource management.
|
||||
|
||||
### [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel)
|
||||
|
||||
🟢 Fully implemented.
|
||||
@@ -270,6 +274,10 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
🔴 Not implemented.
|
||||
|
||||
### [`DisposableStack`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DisposableStack)
|
||||
|
||||
🟢 Fully implemented. Used with `using` syntax for explicit resource management, automatically calling disposal methods when leaving scope.
|
||||
|
||||
### [`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event)
|
||||
|
||||
🟢 Fully implemented.
|
||||
|
||||
@@ -23,7 +23,7 @@ afterEach(() => {
|
||||
// tests...
|
||||
```
|
||||
|
||||
Perform per-scope setup and teardown logic with `beforeAll` and `afterAll`. The _scope_ is determined by where the hook is defined.
|
||||
Perform per-scope setup and teardown logic with `beforeAll` and `afterAll`. The _scope_ is determined by where the hook is defined. Note that `afterAll` can also be called inside individual tests for test-specific cleanup.
|
||||
|
||||
To scope the hooks to a particular `describe` block:
|
||||
|
||||
@@ -67,6 +67,8 @@ afterAll(() => {
|
||||
});
|
||||
```
|
||||
|
||||
When using `--preload`, `beforeAll` runs before the first test file and `afterAll` runs after the last test file, ensuring proper global setup and teardown across your entire test suite.
|
||||
|
||||
Then use `--preload` to run the setup script before any test files.
|
||||
|
||||
```ts
|
||||
|
||||
@@ -34,6 +34,14 @@ test/package-json-lint.test.ts:
|
||||
Ran 4 tests across 1 files. [0.66ms]
|
||||
```
|
||||
|
||||
### Dots Reporter
|
||||
|
||||
For minimalist test output, use the dots reporter with `--reporter=dots` or `--dots`. It displays `.` for each passing test and `F` for each failure, making it ideal for CI environments or when running large test suites.
|
||||
|
||||
```sh
|
||||
$ bun test --dots
|
||||
```
|
||||
|
||||
### JUnit XML Reporter
|
||||
|
||||
For CI/CD environments, Bun supports generating JUnit XML reports. JUnit XML is a widely-adopted format for test results that can be parsed by many CI/CD systems, including GitLab, Jenkins, and others.
|
||||
|
||||
@@ -47,7 +47,7 @@ When you run the test, Bun automatically updates the test file itself with the g
|
||||
3. Bun automatically updates your test file with the snapshot
|
||||
4. On subsequent runs, the value will be compared against the inline snapshot
|
||||
|
||||
Inline snapshots are particularly useful for small, simple values where it's helpful to see the expected output right in the test file.
|
||||
Inline snapshots are particularly useful for small, simple values where it's helpful to see the expected output right in the test file. Multiple inline snapshots on the same line are allowed if they have identical values.
|
||||
|
||||
## Error snapshots
|
||||
|
||||
|
||||
Reference in New Issue
Block a user