diff --git a/docs/api/binary-data.md b/docs/api/binary-data.md index 888ed524b4..f6f768da84 100644 --- a/docs/api/binary-data.md +++ b/docs/api/binary-data.md @@ -1099,7 +1099,7 @@ The following cryptographic hash algorithms are supported: ```ts // SHA family new CryptoHasher("sha1"); -new CryptoHasher("sha224"); +new CryptoHasher("sha224"); new CryptoHasher("sha256"); new CryptoHasher("sha384"); new CryptoHasher("sha512"); @@ -1141,14 +1141,14 @@ For processing large files efficiently: async function hashFile(path: string, algorithm = "sha256"): Promise { const hasher = new CryptoHasher(algorithm); const file = Bun.file(path); - + // Process file in chunks using async iteration const stream = file.stream(); - + for await (const chunk of stream) { hasher.update(chunk); } - + return hasher.digest("hex"); } diff --git a/docs/api/hashing.md b/docs/api/hashing.md index a548845a77..a288ed46d5 100644 --- a/docs/api/hashing.md +++ b/docs/api/hashing.md @@ -413,8 +413,8 @@ Each hash class has a static `byteLength` property indicating the output size: ```ts console.log(Bun.SHA256.byteLength); // => 32 -console.log(Bun.SHA1.byteLength); // => 20 -console.log(Bun.MD5.byteLength); // => 16 +console.log(Bun.SHA1.byteLength); // => 20 +console.log(Bun.MD5.byteLength); // => 16 ``` ### Security Considerations @@ -451,7 +451,10 @@ hasher.update("jumps over the lazy dog"); const hash = hasher.digest("hex"); // Using static method for one-shot hashing -const quickHash = Bun.SHA256.hash("The quick brown fox jumps over the lazy dog", "hex"); +const quickHash = Bun.SHA256.hash( + "The quick brown fox jumps over the lazy dog", + "hex", +); // Both produce the same result console.log(hash === quickHash); // => true @@ -470,7 +473,9 @@ const hash1 = Bun.SHA256.hash(data, "hex"); // Method 2: Write into existing buffer (avoids allocation) const output = new Uint8Array(32); Bun.SHA256.hash(data, output); -const hash2 = Array.from(output, byte => byte.toString(16).padStart(2, '0')).join(''); +const hash2 = Array.from(output, byte => + byte.toString(16).padStart(2, "0"), +).join(""); console.log(hash1 === hash2); // => true ``` @@ -480,11 +485,11 @@ console.log(hash1 === hash2); // => true ```ts const data = "hello world"; -console.log("MD5: ", Bun.MD5.hash(data, "hex")); // 16 bytes -console.log("SHA1: ", Bun.SHA1.hash(data, "hex")); // 20 bytes -console.log("SHA224: ", Bun.SHA224.hash(data, "hex")); // 28 bytes -console.log("SHA256: ", Bun.SHA256.hash(data, "hex")); // 32 bytes -console.log("SHA384: ", Bun.SHA384.hash(data, "hex")); // 48 bytes -console.log("SHA512: ", Bun.SHA512.hash(data, "hex")); // 64 bytes +console.log("MD5: ", Bun.MD5.hash(data, "hex")); // 16 bytes +console.log("SHA1: ", Bun.SHA1.hash(data, "hex")); // 20 bytes +console.log("SHA224: ", Bun.SHA224.hash(data, "hex")); // 28 bytes +console.log("SHA256: ", Bun.SHA256.hash(data, "hex")); // 32 bytes +console.log("SHA384: ", Bun.SHA384.hash(data, "hex")); // 48 bytes +console.log("SHA512: ", Bun.SHA512.hash(data, "hex")); // 64 bytes console.log("SHA512/256: ", Bun.SHA512_256.hash(data, "hex")); // 32 bytes ``` diff --git a/docs/api/utils.md b/docs/api/utils.md index 500866efd1..89c8a02f0a 100644 --- a/docs/api/utils.md +++ b/docs/api/utils.md @@ -631,7 +631,7 @@ const input = "hello world".repeat(100); const compressed = Bun.zstdCompressSync(input); // => Buffer -console.log(input.length); // => 1100 +console.log(input.length); // => 1100 console.log(compressed.length); // => 25 (significantly smaller!) ``` @@ -653,7 +653,11 @@ const balanced = Bun.zstdCompressSync(data, { level: 3 }); // Maximum compression, slower but smallest output const small = Bun.zstdCompressSync(data, { level: 22 }); -console.log({ fast: fast.length, balanced: balanced.length, small: small.length }); +console.log({ + fast: fast.length, + balanced: balanced.length, + small: small.length, +}); // => { fast: 2776, balanced: 1064, small: 1049 } ``` @@ -683,7 +687,9 @@ The function automatically detects the format and decompresses accordingly: // Works with any input type that was compressed const stringCompressed = Bun.zstdCompressSync("text data"); const bufferCompressed = Bun.zstdCompressSync(Buffer.from("binary data")); -const uint8Compressed = Bun.zstdCompressSync(new TextEncoder().encode("encoded data")); +const uint8Compressed = Bun.zstdCompressSync( + new TextEncoder().encode("encoded data"), +); console.log(new TextDecoder().decode(Bun.zstdDecompressSync(stringCompressed))); console.log(new TextDecoder().decode(Bun.zstdDecompressSync(bufferCompressed))); @@ -699,14 +705,16 @@ const largeData = "large dataset ".repeat(100000); // Won't block the event loop const compressed = await Bun.zstdCompress(largeData, { level: 9 }); -console.log(`Compressed ${largeData.length} bytes to ${compressed.length} bytes`); +console.log( + `Compressed ${largeData.length} bytes to ${compressed.length} bytes`, +); ``` The async version accepts the same compression levels and options as the sync version: ```ts // Different compression levels -const level1 = await Bun.zstdCompress(data, { level: 1 }); // Fast +const level1 = await Bun.zstdCompress(data, { level: 1 }); // Fast const level12 = await Bun.zstdCompress(data, { level: 12 }); // Balanced const level22 = await Bun.zstdCompress(data, { level: 22 }); // Maximum compression ``` @@ -801,7 +809,7 @@ const server = Bun.serve({ async fetch(req) { const acceptEncoding = req.headers.get("Accept-Encoding") || ""; const responseData = "Large response content...".repeat(1000); - + if (acceptEncoding.includes("zstd")) { const compressed = await Bun.zstdCompress(responseData, { level: 6 }); return new Response(compressed, { @@ -809,13 +817,13 @@ const server = Bun.serve({ "Content-Encoding": "zstd", "Content-Type": "text/plain", "Content-Length": compressed.length.toString(), - } + }, }); } - + // Fallback to uncompressed return new Response(responseData, { - headers: { "Content-Type": "text/plain" } + headers: { "Content-Type": "text/plain" }, }); }, port: 3000, @@ -897,7 +905,7 @@ console.log(lineStart); // => 6 (start of "World" line) // The character at index 8 is 'r' in "World" console.log(text[8]); // => 'r' -console.log(text.slice(lineStart, text.indexOf('\n', lineStart))); +console.log(text.slice(lineStart, text.indexOf("\n", lineStart))); // => "World" ``` @@ -911,7 +919,9 @@ console.log(lineStart); // => 7 (start of "Line 2") // Convert back to string to verify const decoder = new TextDecoder(); const lineEnd = buffer.indexOf(0x0a, lineStart); // 0x0a is '\n' -const line = decoder.decode(buffer.slice(lineStart, lineEnd === -1 ? undefined : lineEnd)); +const line = decoder.decode( + buffer.slice(lineStart, lineEnd === -1 ? undefined : lineEnd), +); console.log(line); // => "Line 2" ``` @@ -920,7 +930,7 @@ Useful for building development tools like linters, formatters, or language serv ```ts function getLineAndColumn(text: string, index: number) { const lineStart = Bun.indexOfLine(text, index); - const lineNumber = text.slice(0, lineStart).split('\n').length; + const lineNumber = text.slice(0, lineStart).split("\n").length; const column = index - lineStart + 1; return { line: lineNumber, column }; } @@ -952,7 +962,7 @@ It handles various special characters that have meaning in shells: Bun.shellEscape("hello; rm -rf /"); // => 'hello; rm -rf /' Bun.shellEscape("$HOME/file"); // => '$HOME/file' Bun.shellEscape("`whoami`"); // => '`whoami`' -Bun.shellEscape("a\"quote\""); // => 'a"quote"' +Bun.shellEscape('a"quote"'); // => 'a"quote"' // Already safe strings pass through unchanged Bun.shellEscape("simple-filename.txt"); // => simple-filename.txt @@ -964,13 +974,13 @@ Essential for safely constructing shell commands with user input: function safeCopy(source: string, destination: string) { const safeSource = Bun.shellEscape(source); const safeDest = Bun.shellEscape(destination); - + // Now safe to execute const proc = Bun.spawn({ cmd: ["sh", "-c", `cp ${safeSource} ${safeDest}`], - stderr: "pipe" + stderr: "pipe", }); - + return proc; } @@ -1002,14 +1012,14 @@ Best used when you'll immediately fill the entire buffer: function readFileToBuffer(path: string): Uint8Array { const file = Bun.file(path); const size = file.size; - + // Safe to use allocUnsafe since we'll overwrite everything const buffer = Bun.allocUnsafe(size); - + // Fill the entire buffer with file data const bytes = file.bytes(); buffer.set(bytes); - + return buffer; } ``` @@ -1048,6 +1058,7 @@ Bun.gc(true); ``` **Parameters:** + - `force` (`boolean`, optional): If `true`, runs garbage collection synchronously (blocking). Default is asynchronous. **Note**: Manual garbage collection is generally not recommended in production applications. The JavaScript engine's automatic GC is typically more efficient. @@ -1076,12 +1087,14 @@ await Bun.write("heap.heapsnapshot", snapshot); ``` **Formats:** + - `"jsc"` (default): Returns a `HeapSnapshot` object compatible with Safari Web Inspector and `bun --inspect` - `"v8"`: Returns a JSON string compatible with Chrome DevTools **Usage in development:** + 1. Generate snapshot: `const snap = Bun.generateHeapSnapshot("v8")` -2. Save to file: `await Bun.write("memory.heapsnapshot", snap)` +2. Save to file: `await Bun.write("memory.heapsnapshot", snap)` 3. Open in Chrome DevTools > Memory tab > Load snapshot 4. Analyze memory usage, object references, and potential leaks @@ -1097,7 +1110,7 @@ const mapped = Bun.mmap("/path/to/large-file.bin"); // Access file contents directly console.log(mapped.length); // File size in bytes -console.log(mapped[0]); // First byte +console.log(mapped[0]); // First byte console.log(mapped.slice(0, 100)); // First 100 bytes // No explicit cleanup needed - GC will handle unmapping @@ -1123,10 +1136,11 @@ Great for processing large data files: function processLogFile(path: string) { const data = Bun.mmap(path); const decoder = new TextDecoder(); - + let lineStart = 0; for (let i = 0; i < data.length; i++) { - if (data[i] === 0x0a) { // newline + if (data[i] === 0x0a) { + // newline const line = decoder.decode(data.slice(lineStart, i)); processLine(line); lineStart = i + 1; @@ -1140,12 +1154,12 @@ function processLine(line: string) { ``` **Important considerations:** + - The mapped memory is read-only - Changes to the underlying file may or may not be reflected in the mapped data - The mapping is automatically unmapped when the Uint8Array is garbage collected - Very large files may hit system memory mapping limits - ## `Bun.inspect.table(tabularData, properties, options)` Format tabular data into a string. Like [`console.table`](https://developer.mozilla.org/en-US/docs/Web/API/console/table_static), except it returns a string rather than printing to the console. @@ -1228,7 +1242,7 @@ This is significantly more precise than `Date.now()` which returns milliseconds, ```ts // Comparing precision -Date.now(); // milliseconds (e.g. 1703123456789) +Date.now(); // milliseconds (e.g. 1703123456789) performance.now(); // milliseconds with sub-millisecond precision (e.g. 123.456) Bun.nanoseconds(); // nanoseconds (e.g. 1703123456789123456) ``` @@ -1314,10 +1328,10 @@ Useful for building tools that need to understand module resolution: function findDependencies(entryPoint: string): string[] { const dependencies: string[] = []; const source = Bun.file(entryPoint).text(); - + // Simple regex to find import statements (real implementation would use a parser) const imports = source.match(/import .* from ["']([^"']+)["']/g) || []; - + for (const importStmt of imports) { const specifier = importStmt.match(/from ["']([^"']+)["']/)?.[1]; if (specifier) { @@ -1329,7 +1343,7 @@ function findDependencies(entryPoint: string): string[] { } } } - + return dependencies; } ``` @@ -1361,6 +1375,7 @@ try { ``` Both functions respect: + - `package.json` `exports` and `main` fields - `node_modules` resolution algorithm - TypeScript-style path mapping @@ -1480,6 +1495,7 @@ console.log(str); // => "hello" ``` **⚠️ Critical warnings:** + - **Only use this for ASCII strings**. Non-ASCII characters may crash your application or cause confusing bugs like `"foo" !== "foo"` - **The input buffer must not be garbage collected**. Hold a reference to the buffer for the string's entire lifetime - **Memory corruption risk**: Incorrect usage can lead to security vulnerabilities @@ -1500,8 +1516,9 @@ Bun.unsafe.gcAggressionLevel(previousLevel); ``` **Levels:** + - `0`: Default, disabled -- `1`: Asynchronously call GC more often +- `1`: Asynchronously call GC more often - `2`: Synchronously call GC more often (most aggressive) **Environment variable**: `BUN_GARBAGE_COLLECTOR_LEVEL` is also supported. @@ -1549,22 +1566,25 @@ console.log(tokenWithSecret); // => "base64url-encoded-token" const customToken = CSRF.generate("my-secret", { encoding: "hex", expiresIn: 60 * 60 * 1000, // 1 hour in milliseconds - algorithm: "sha256" + algorithm: "sha256", }); ``` **Parameters:** + - `secret` (`string`, optional): Secret key for token generation. If not provided, uses a default internal secret - `options` (`CSRFGenerateOptions`, optional): Configuration options **Options:** + - `encoding` (`"base64url" | "base64" | "hex"`): Output encoding format (default: `"base64url"`) - `expiresIn` (`number`): Token expiration time in milliseconds (default: 24 hours) - `algorithm` (`CSRFAlgorithm`): Hash algorithm to use (default: `"sha256"`) **Supported algorithms:** + - `"blake2b256"` - BLAKE2b with 256-bit output -- `"blake2b512"` - BLAKE2b with 512-bit output +- `"blake2b512"` - BLAKE2b with 512-bit output - `"sha256"` - SHA-256 (default) - `"sha384"` - SHA-384 - `"sha512"` - SHA-512 @@ -1591,18 +1611,20 @@ const isInvalid = CSRF.verify(token, { secret: "wrong-secret" }); console.log(isInvalid); // => false // Verify with maxAge constraint -const isExpired = CSRF.verify(token, { - secret, - maxAge: 1000 // 1 second +const isExpired = CSRF.verify(token, { + secret, + maxAge: 1000, // 1 second }); // If more than 1 second has passed, this will return false ``` **Parameters:** + - `token` (`string`): The CSRF token to verify - `options` (`CSRFVerifyOptions`, optional): Verification options **Options:** + - `secret` (`string`, optional): Secret key used for verification. If not provided, uses the default internal secret - `encoding` (`"base64url" | "base64" | "hex"`): Token encoding format (default: `"base64url"`) - `maxAge` (`number`, optional): Maximum age in milliseconds. If specified, tokens older than this will be rejected @@ -1638,7 +1660,7 @@ app.use((req, res, next) => { app.use((req, res, next) => { if (["POST", "PUT", "DELETE", "PATCH"].includes(req.method)) { const token = req.body._csrf || req.headers["x-csrf-token"]; - + if (!token || !CSRF.verify(token, { secret })) { return res.status(403).json({ error: "Invalid CSRF token" }); } @@ -1658,8 +1680,8 @@ app.post("/api/data", (req, res) => { ```html
- - + +
``` @@ -1672,7 +1694,7 @@ import { CSRF } from "bun"; try { // Generate token const token = CSRF.generate("my-secret"); - + // Verify token const isValid = CSRF.verify(token, { secret: "my-secret" }); } catch (error) { @@ -1685,6 +1707,7 @@ try { ``` Common error scenarios: + - Empty or invalid token strings throw verification errors - Empty secret strings throw generation/verification errors - Invalid encoding options are handled gracefully diff --git a/docs/bundler/executables.md b/docs/bundler/executables.md index 3397630d9a..f52823acc8 100644 --- a/docs/bundler/executables.md +++ b/docs/bundler/executables.md @@ -391,13 +391,13 @@ console.log(embeddedFiles[0].type); // MIME type (e.g., "image/png") `Bun.embeddedFiles` returns a read-only array of `Blob` objects sorted lexicographically by filename. Each `Blob` provides access to the embedded file's contents and metadata. ```ts -const embeddedFiles: ReadonlyArray +const embeddedFiles: ReadonlyArray; ``` **Properties of embedded file `Blob`s:** - `name` (`string`): The filename with hash suffix (e.g., `icon-a1b2c3.png`) -- `size` (`number`): File size in bytes +- `size` (`number`): File size in bytes - `type` (`string`): MIME type automatically detected from file extension - Standard `Blob` methods: `text()`, `arrayBuffer()`, `bytes()`, `stream()`, `slice()` @@ -418,9 +418,9 @@ if (logo) { // Process all embedded files for (const file of embeddedFiles) { console.log(`File: ${file.name}`); - console.log(` Size: ${file.size} bytes`); + console.log(` Size: ${file.size} bytes`); console.log(` Type: ${file.type}`); - + // Read file content based on type if (file.type.startsWith("text/") || file.type === "application/json") { const content = await file.text(); @@ -451,31 +451,31 @@ const server = Bun.serve({ fetch(req) { const url = new URL(req.url); const filename = url.pathname.slice(1); // Remove leading slash - + // Find embedded file by filename (ignoring hash) - const file = embeddedFiles.find(f => - f.name.includes(filename.split('.')[0]) + const file = embeddedFiles.find(f => + f.name.includes(filename.split(".")[0]), ); - + if (file) { return new Response(file, { headers: { "Content-Type": file.type, "Content-Length": file.size.toString(), - "Cache-Control": "public, max-age=31536000" // 1 year cache - } + "Cache-Control": "public, max-age=31536000", // 1 year cache + }, }); } - + return new Response("Not found", { status: 404 }); - } + }, }); ``` ### Important notes - **Read-only**: The `embeddedFiles` array and individual files cannot be modified at runtime -- **Empty when not compiled**: Returns an empty array when running with `bun run` (not compiled) +- **Empty when not compiled**: Returns an empty array when running with `bun run` (not compiled) - **Hash suffixes**: Filenames include content hashes for cache busting (e.g., `style-a1b2c3.css`) - **MIME type detection**: File types are automatically detected from file extensions - **Memory efficient**: Files are lazily loaded - accessing content triggers reading from the embedded data diff --git a/docs/bundler/macros.md b/docs/bundler/macros.md index 7d4013acfc..eb4ee464cf 100644 --- a/docs/bundler/macros.md +++ b/docs/bundler/macros.md @@ -327,4 +327,3 @@ export { Head }; ``` {% /codetabs %} - diff --git a/docs/guides/util/zstd.md b/docs/guides/util/zstd.md index 7a32ab923e..b2779e2c71 100644 --- a/docs/guides/util/zstd.md +++ b/docs/guides/util/zstd.md @@ -15,7 +15,9 @@ const compressed = Bun.zstdCompressSync(data); console.log(`Original: ${data.length} bytes`); console.log(`Compressed: ${compressed.length} bytes`); -console.log(`Compression ratio: ${(data.length / compressed.length).toFixed(2)}x`); +console.log( + `Compression ratio: ${(data.length / compressed.length).toFixed(2)}x`, +); ``` The function accepts strings, `Uint8Array`, `ArrayBuffer`, `Buffer`, and other binary data types: @@ -28,7 +30,9 @@ const textCompressed = Bun.zstdCompressSync("Hello, world!"); const bufferCompressed = Bun.zstdCompressSync(Buffer.from("Hello, world!")); // Uint8Array -const uint8Compressed = Bun.zstdCompressSync(new TextEncoder().encode("Hello, world!")); +const uint8Compressed = Bun.zstdCompressSync( + new TextEncoder().encode("Hello, world!"), +); ``` ## Synchronous decompression @@ -71,8 +75,9 @@ console.log(text); // => "Hello, world!" ## Compression levels Zstandard supports compression levels from 1 to 22, where: + - **Level 1**: Fastest compression, larger file size -- **Level 3**: Default level (good balance of speed and compression) +- **Level 3**: Default level (good balance of speed and compression) - **Level 19**: Very high compression, slower - **Level 22**: Maximum compression, slowest @@ -182,19 +187,19 @@ const server = Bun.serve({ async fetch(req) { const acceptEncoding = req.headers.get("Accept-Encoding") || ""; const content = "Large response content..."; - + if (acceptEncoding.includes("zstd")) { const compressed = await Bun.zstdCompress(content, { level: 6 }); return new Response(compressed, { headers: { "Content-Encoding": "zstd", - "Content-Type": "text/plain" - } + "Content-Type": "text/plain", + }, }); } - + return new Response(content); - } + }, }); ``` @@ -211,4 +216,4 @@ For maximum compatibility, consider falling back to gzip for older clients. --- -See [Docs > API > Utils](/docs/api/utils) for more compression utilities including gzip, deflate, and brotli. \ No newline at end of file +See [Docs > API > Utils](/docs/api/utils) for more compression utilities including gzip, deflate, and brotli.