mirror of
https://github.com/oven-sh/bun
synced 2026-02-22 00:32:02 +00:00
Compare commits
179 Commits
fix-test-h
...
don/fix/pm
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9ae300752 | ||
|
|
950ce32cd0 | ||
|
|
b00e8037c5 | ||
|
|
4ccf5c03dc | ||
|
|
8c7c42055b | ||
|
|
1d6bdf745b | ||
|
|
9f023d7471 | ||
|
|
44f252539a | ||
|
|
44f70b4301 | ||
|
|
9a329c04cc | ||
|
|
f677ac322c | ||
|
|
03f3a59ff5 | ||
|
|
4a44257457 | ||
|
|
f912fd8100 | ||
|
|
dff1f555b4 | ||
|
|
d028e1aaa3 | ||
|
|
5f9f200e7e | ||
|
|
5fa14574a6 | ||
|
|
eee5d4fb4a | ||
|
|
ca8b7fb36e | ||
|
|
ad3f367520 | ||
|
|
02023810ba | ||
|
|
9eae7787a0 | ||
|
|
ec87a27d87 | ||
|
|
431b28fd6b | ||
|
|
f5a710f324 | ||
|
|
95fead19f9 | ||
|
|
78ee4a3e82 | ||
|
|
ed410d0597 | ||
|
|
ba0bd426ed | ||
|
|
a2efbd4ca2 | ||
|
|
5d1ca1f371 | ||
|
|
580e743ebd | ||
|
|
9fa3bc4b93 | ||
|
|
8b2b34086c | ||
|
|
3d6e1153fc | ||
|
|
340ae94d0f | ||
|
|
e75d226943 | ||
|
|
a8cc31f8c4 | ||
|
|
a1e1f720ed | ||
|
|
c86097aeb0 | ||
|
|
d1ac711a7e | ||
|
|
378c68a024 | ||
|
|
84001acf22 | ||
|
|
28e7a830a0 | ||
|
|
bb4f8d8933 | ||
|
|
6c3aaefed2 | ||
|
|
11f9538b9e | ||
|
|
2bbdf4f950 | ||
|
|
8414ef1562 | ||
|
|
f505cf6f66 | ||
|
|
a52f2f4a8d | ||
|
|
d9c77be90d | ||
|
|
04a432f54f | ||
|
|
94addcf2a5 | ||
|
|
d5660f7a37 | ||
|
|
3e358a1708 | ||
|
|
6b206ae0a9 | ||
|
|
4afaa4cb60 | ||
|
|
c40663bdf1 | ||
|
|
11f2b5fb55 | ||
|
|
3577dd8924 | ||
|
|
976330f4e2 | ||
|
|
b950f85705 | ||
|
|
d7a8208ff5 | ||
|
|
0efbbd3870 | ||
|
|
ebb03afae0 | ||
|
|
b6f919caba | ||
|
|
c6076f2e4e | ||
|
|
946f41c01a | ||
|
|
4cea70a484 | ||
|
|
5392cd1d28 | ||
|
|
38a776a404 | ||
|
|
575d2c40a8 | ||
|
|
ff939e9fb3 | ||
|
|
c29933f823 | ||
|
|
13068395c0 | ||
|
|
e7576bb204 | ||
|
|
4806e84cc1 | ||
|
|
a199b85f2b | ||
|
|
ff9c466f9b | ||
|
|
323d78df5e | ||
|
|
adab0f64f9 | ||
|
|
41d3f1bc9d | ||
|
|
b34703914c | ||
|
|
f3da1b80bc | ||
|
|
0814abe21e | ||
|
|
c3be6732d1 | ||
|
|
c3e2bf0fc4 | ||
|
|
78a9396038 | ||
|
|
e2ce3bd4ce | ||
|
|
fee911194a | ||
|
|
358a1db422 | ||
|
|
8929d65f0e | ||
|
|
f14e26bc85 | ||
|
|
43f7a241b9 | ||
|
|
7021c42cf2 | ||
|
|
1b10b61423 | ||
|
|
bb9128c0e8 | ||
|
|
f38d35f7c9 | ||
|
|
f0dfa109bb | ||
|
|
27cf65a1e2 | ||
|
|
e83b5fb720 | ||
|
|
ee89130991 | ||
|
|
0a4f36644f | ||
|
|
a1ab2a4780 | ||
|
|
451c1905a8 | ||
|
|
accccbfdaf | ||
|
|
8e0c8a143e | ||
|
|
9ea577efc0 | ||
|
|
54416dad05 | ||
|
|
8f4575c0e4 | ||
|
|
c7edb24520 | ||
|
|
325acfc230 | ||
|
|
7f60375cca | ||
|
|
dac7f22997 | ||
|
|
f5836c2013 | ||
|
|
70ddfb55e6 | ||
|
|
934e41ae59 | ||
|
|
f4ae8c7254 | ||
|
|
2a9569cec4 | ||
|
|
31060a5e2a | ||
|
|
5c0fa6dc21 | ||
|
|
53f311fdd9 | ||
|
|
b40f5c9669 | ||
|
|
317e9d23ab | ||
|
|
11bb3573ea | ||
|
|
39cf0906d1 | ||
|
|
1d655a0232 | ||
|
|
a548c2ec54 | ||
|
|
7740271359 | ||
|
|
75144ab881 | ||
|
|
1dbeed20a9 | ||
|
|
3af6f7a5fe | ||
|
|
1bfccf707b | ||
|
|
21853d08de | ||
|
|
b6502189e8 | ||
|
|
f4ab2e4986 | ||
|
|
57cda4a445 | ||
|
|
49ca2c86e7 | ||
|
|
a08a9c5bfb | ||
|
|
ee8a839500 | ||
|
|
8ee962d79f | ||
|
|
4c3d652f00 | ||
|
|
c21fca08e2 | ||
|
|
77fde278e8 | ||
|
|
517af630e7 | ||
|
|
d8e5335268 | ||
|
|
db492575c8 | ||
|
|
9e580f8413 | ||
|
|
6ba2ba41c6 | ||
|
|
57381d43ed | ||
|
|
90c67c4b79 | ||
|
|
cf9f2bf98e | ||
|
|
8ebd5d53da | ||
|
|
60acfb17f0 | ||
|
|
8735a3f4d6 | ||
|
|
a07844ea13 | ||
|
|
1656bca9ab | ||
|
|
43af1a2283 | ||
|
|
84a21234d4 | ||
|
|
fefdaefb97 | ||
|
|
50eaea19cb | ||
|
|
438d8555c6 | ||
|
|
163a51c0f6 | ||
|
|
8df7064f73 | ||
|
|
99ee90a58f | ||
|
|
46c43d954c | ||
|
|
b37054697b | ||
|
|
5d50281f1a | ||
|
|
6bef525704 | ||
|
|
687a0ab5a4 | ||
|
|
60ae19bded | ||
|
|
be41c884b4 | ||
|
|
73d1b2ff67 | ||
|
|
2312b2c0f2 | ||
|
|
eae2c889ed | ||
|
|
ddd87fef12 | ||
|
|
f36d480919 |
@@ -1,13 +1,14 @@
|
||||
---
|
||||
description: JavaScript class implemented in C++
|
||||
globs: *.cpp
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Implementing JavaScript classes in C++
|
||||
|
||||
If there is a publicly accessible Constructor and Prototype, then there are 3 classes:
|
||||
|
||||
- IF there are C++ class members we need a destructor, so `class Foo : public JSC::DestructibleObject`, if no C++ class fields (only JS properties) then we don't need a class at all usually. We can instead use JSC::constructEmptyObject(vm, structure) and `putDirectOffset` like in [NodeFSBinding.cpp](mdc:src/bun.js/bindings/NodeFSBinding.cpp).
|
||||
- IF there are C++ class members we need a destructor, so `class Foo : public JSC::DestructibleObject`, if no C++ class fields (only JS properties) then we don't need a class at all usually. We can instead use JSC::constructEmptyObject(vm, structure) and `putDirectOffset` like in [NodeFSStatBinding.cpp](mdc:src/bun.js/bindings/NodeFSStatBinding.cpp).
|
||||
- class FooPrototype : public JSC::JSNonFinalObject
|
||||
- class FooConstructor : public JSC::InternalFunction
|
||||
|
||||
@@ -18,6 +19,7 @@ If there are C++ fields on the Foo class, the Foo class will need an iso subspac
|
||||
Usually you'll need to #include "root.h" at the top of C++ files or you'll get lint errors.
|
||||
|
||||
Generally, defining the subspace looks like this:
|
||||
|
||||
```c++
|
||||
|
||||
class Foo : public JSC::DestructibleObject {
|
||||
@@ -45,6 +47,7 @@ It's better to put it in the .cpp file instead of the .h file, when possible.
|
||||
## Defining properties
|
||||
|
||||
Define properties on the prototype. Use a const HashTableValues like this:
|
||||
|
||||
```C++
|
||||
static JSC_DECLARE_HOST_FUNCTION(jsX509CertificateProtoFuncCheckEmail);
|
||||
static JSC_DECLARE_HOST_FUNCTION(jsX509CertificateProtoFuncCheckHost);
|
||||
@@ -158,6 +161,7 @@ void JSX509CertificatePrototype::finishCreation(VM& vm)
|
||||
```
|
||||
|
||||
### Getter definition:
|
||||
|
||||
```C++
|
||||
|
||||
JSC_DEFINE_CUSTOM_GETTER(jsX509CertificateGetter_ca, (JSGlobalObject * globalObject, EncodedJSValue thisValue, PropertyName))
|
||||
@@ -212,7 +216,6 @@ JSC_DEFINE_HOST_FUNCTION(jsX509CertificateProtoFuncToJSON, (JSGlobalObject * glo
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Constructor definition
|
||||
|
||||
```C++
|
||||
@@ -259,7 +262,6 @@ private:
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
### Structure caching
|
||||
|
||||
If there's a class, prototype, and constructor:
|
||||
@@ -279,6 +281,7 @@ void GlobalObject::finishCreation(VM& vm) {
|
||||
```
|
||||
|
||||
Then, implement the function that creates the structure:
|
||||
|
||||
```c++
|
||||
void setupX509CertificateClassStructure(LazyClassStructure::Initializer& init)
|
||||
{
|
||||
@@ -301,11 +304,12 @@ If there's only a class, use `JSC::LazyProperty<JSGlobalObject, Structure>` inst
|
||||
1. Add the `JSC::LazyProperty<JSGlobalObject, Structure>` to @ZigGlobalObject.h
|
||||
2. Initialize the class structure in @ZigGlobalObject.cpp in `void GlobalObject::finishCreation(VM& vm)`
|
||||
3. Visit the lazy property in visitChildren in @ZigGlobalObject.cpp in `void GlobalObject::visitChildrenImpl`
|
||||
void GlobalObject::finishCreation(VM& vm) {
|
||||
// ...
|
||||
void GlobalObject::finishCreation(VM& vm) {
|
||||
// ...
|
||||
this.m_myLazyProperty.initLater([](const JSC::LazyProperty<JSC::JSGlobalObject, JSC::Structure>::Initializer& init) {
|
||||
init.set(Bun::initMyStructure(init.vm, reinterpret_cast<Zig::GlobalObject*>(init.owner)));
|
||||
});
|
||||
init.set(Bun::initMyStructure(init.vm, reinterpret_cast<Zig::GlobalObject\*>(init.owner)));
|
||||
});
|
||||
|
||||
```
|
||||
|
||||
Then, implement the function that creates the structure:
|
||||
@@ -316,7 +320,7 @@ Structure* setupX509CertificateStructure(JSC::VM &vm, Zig::GlobalObject* globalO
|
||||
auto* prototypeStructure = JSX509CertificatePrototype::createStructure(init.vm, init.global, init.global->objectPrototype());
|
||||
auto* prototype = JSX509CertificatePrototype::create(init.vm, init.global, prototypeStructure);
|
||||
|
||||
// If there is no prototype or it only has
|
||||
// If there is no prototype or it only has
|
||||
|
||||
auto* structure = JSX509Certificate::createStructure(init.vm, init.global, prototype);
|
||||
init.setPrototype(prototype);
|
||||
@@ -325,7 +329,6 @@ Structure* setupX509CertificateStructure(JSC::VM &vm, Zig::GlobalObject* globalO
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Then, use the structure by calling `globalObject.m_myStructureName.get(globalObject)`
|
||||
|
||||
```C++
|
||||
@@ -378,12 +381,14 @@ extern "C" JSC::EncodedJSValue Bun__JSBigIntStatsObjectConstructor(Zig::GlobalOb
|
||||
```
|
||||
|
||||
Zig:
|
||||
|
||||
```zig
|
||||
extern "c" fn Bun__JSBigIntStatsObjectConstructor(*JSC.JSGlobalObject) JSC.JSValue;
|
||||
pub const getBigIntStatsConstructor = Bun__JSBigIntStatsObjectConstructor;
|
||||
```
|
||||
|
||||
To create an object (instance) of a JS class defined in C++ from Zig, follow the __toJS convention like this:
|
||||
To create an object (instance) of a JS class defined in C++ from Zig, follow the \_\_toJS convention like this:
|
||||
|
||||
```c++
|
||||
// X509* is whatever we need to create the object
|
||||
extern "C" EncodedJSValue Bun__X509__toJS(Zig::GlobalObject* globalObject, X509* cert)
|
||||
@@ -395,12 +400,13 @@ extern "C" EncodedJSValue Bun__X509__toJS(Zig::GlobalObject* globalObject, X509*
|
||||
```
|
||||
|
||||
And from Zig:
|
||||
|
||||
```zig
|
||||
const X509 = opaque {
|
||||
// ... class
|
||||
// ... class
|
||||
|
||||
extern fn Bun__X509__toJS(*JSC.JSGlobalObject, *X509) JSC.JSValue;
|
||||
|
||||
|
||||
pub fn toJS(this: *X509, globalObject: *JSC.JSGlobalObject) JSC.JSValue {
|
||||
return Bun__X509__toJS(globalObject, this);
|
||||
}
|
||||
|
||||
488
.cursor/rules/zig-javascriptcore-classes.mdc
Normal file
488
.cursor/rules/zig-javascriptcore-classes.mdc
Normal file
@@ -0,0 +1,488 @@
|
||||
---
|
||||
description: How Zig works with JavaScriptCore bindings generator
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# Bun's JavaScriptCore Class Bindings Generator
|
||||
|
||||
This document explains how Bun's class bindings generator works to bridge Zig and JavaScript code through JavaScriptCore (JSC).
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
Bun's binding system creates a seamless bridge between JavaScript and Zig, allowing Zig implementations to be exposed as JavaScript classes. The system has several key components:
|
||||
|
||||
1. **Zig Implementation** (.zig files)
|
||||
2. **JavaScript Interface Definition** (.classes.ts files)
|
||||
3. **Generated Code** (C++/Zig files that connect everything)
|
||||
|
||||
## Class Definition Files
|
||||
|
||||
### JavaScript Interface (.classes.ts)
|
||||
|
||||
The `.classes.ts` files define the JavaScript API using a declarative approach:
|
||||
|
||||
```typescript
|
||||
// Example: encoding.classes.ts
|
||||
define({
|
||||
name: "TextDecoder",
|
||||
constructor: true,
|
||||
JSType: "object",
|
||||
finalize: true,
|
||||
proto: {
|
||||
decode: {
|
||||
// Function definition
|
||||
args: 1,
|
||||
},
|
||||
encoding: {
|
||||
// Getter with caching
|
||||
getter: true,
|
||||
cache: true,
|
||||
},
|
||||
fatal: {
|
||||
// Read-only property
|
||||
getter: true,
|
||||
},
|
||||
ignoreBOM: {
|
||||
// Read-only property
|
||||
getter: true,
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
Each class definition specifies:
|
||||
- The class name
|
||||
- Whether it has a constructor
|
||||
- JavaScript type (object, function, etc.)
|
||||
- Properties and methods in the `proto` field
|
||||
- Caching strategy for properties
|
||||
- Finalization requirements
|
||||
|
||||
### Zig Implementation (.zig)
|
||||
|
||||
The Zig files implement the native functionality:
|
||||
|
||||
```zig
|
||||
// Example: TextDecoder.zig
|
||||
pub const TextDecoder = struct {
|
||||
// Internal state
|
||||
encoding: []const u8,
|
||||
fatal: bool,
|
||||
ignoreBOM: bool,
|
||||
|
||||
// Use generated bindings
|
||||
pub usingnamespace JSC.Codegen.JSTextDecoder;
|
||||
pub usingnamespace bun.New(@This());
|
||||
|
||||
// Constructor implementation - note use of globalObject
|
||||
pub fn constructor(
|
||||
globalObject: *JSGlobalObject,
|
||||
callFrame: *JSC.CallFrame,
|
||||
) bun.JSError!*TextDecoder {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
// Prototype methods - note return type includes JSError
|
||||
pub fn decode(
|
||||
this: *TextDecoder,
|
||||
globalObject: *JSGlobalObject,
|
||||
callFrame: *JSC.CallFrame,
|
||||
) bun.JSError!JSC.JSValue {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
// Getters
|
||||
pub fn getEncoding(this: *TextDecoder, globalObject: *JSGlobalObject) JSC.JSValue {
|
||||
return JSC.JSValue.createStringFromUTF8(globalObject, this.encoding);
|
||||
}
|
||||
|
||||
pub fn getFatal(this: *TextDecoder, globalObject: *JSGlobalObject) JSC.JSValue {
|
||||
return JSC.JSValue.jsBoolean(this.fatal);
|
||||
}
|
||||
|
||||
// Cleanup - note standard pattern of using deinit/deref
|
||||
pub fn deinit(this: *TextDecoder) void {
|
||||
// Release any retained resources
|
||||
}
|
||||
|
||||
pub fn finalize(this: *TextDecoder) void {
|
||||
this.deinit();
|
||||
// Or sometimes this is used to free memory instead
|
||||
bun.default_allocator.destroy(this);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
Key components in the Zig file:
|
||||
- The struct containing native state
|
||||
- `usingnamespace JSC.Codegen.JS<ClassName>` to include generated code
|
||||
- `usingnamespace bun.New(@This())` for object creation helpers
|
||||
- Constructor and methods using `bun.JSError!JSValue` return type for proper error handling
|
||||
- Consistent use of `globalObject` parameter name instead of `ctx`
|
||||
- Methods matching the JavaScript interface
|
||||
- Getters/setters for properties
|
||||
- Proper resource cleanup pattern with `deinit()` and `finalize()`
|
||||
|
||||
## Code Generation System
|
||||
|
||||
The binding generator produces C++ code that connects JavaScript and Zig:
|
||||
|
||||
1. **JSC Class Structure**: Creates C++ classes for the JS object, prototype, and constructor
|
||||
2. **Memory Management**: Handles GC integration through JSC's WriteBarrier
|
||||
3. **Method Binding**: Connects JS function calls to Zig implementations
|
||||
4. **Type Conversion**: Converts between JS values and Zig types
|
||||
5. **Property Caching**: Implements the caching system for properties
|
||||
|
||||
The generated C++ code includes:
|
||||
- A JSC wrapper class (`JSTextDecoder`)
|
||||
- A prototype class (`JSTextDecoderPrototype`)
|
||||
- A constructor function (`JSTextDecoderConstructor`)
|
||||
- Function bindings (`TextDecoderPrototype__decodeCallback`)
|
||||
- Property getters/setters (`TextDecoderPrototype__encodingGetterWrap`)
|
||||
|
||||
## CallFrame Access
|
||||
|
||||
The `CallFrame` object provides access to JavaScript execution context:
|
||||
|
||||
```zig
|
||||
pub fn decode(
|
||||
this: *TextDecoder,
|
||||
globalObject: *JSGlobalObject,
|
||||
callFrame: *JSC.CallFrame
|
||||
) bun.JSError!JSC.JSValue {
|
||||
// Get arguments
|
||||
const input = callFrame.argument(0);
|
||||
const options = callFrame.argument(1);
|
||||
|
||||
// Get this value
|
||||
const thisValue = callFrame.thisValue();
|
||||
|
||||
// Implementation with error handling
|
||||
if (input.isUndefinedOrNull()) {
|
||||
return globalObject.throw("Input cannot be null or undefined", .{});
|
||||
}
|
||||
|
||||
// Return value or throw error
|
||||
return JSC.JSValue.jsString(globalObject, "result");
|
||||
}
|
||||
```
|
||||
|
||||
CallFrame methods include:
|
||||
- `argument(i)`: Get the i-th argument
|
||||
- `argumentCount()`: Get the number of arguments
|
||||
- `thisValue()`: Get the `this` value
|
||||
- `callee()`: Get the function being called
|
||||
|
||||
## Property Caching and GC-Owned Values
|
||||
|
||||
The `cache: true` option in property definitions enables JSC's WriteBarrier to efficiently store values:
|
||||
|
||||
```typescript
|
||||
encoding: {
|
||||
getter: true,
|
||||
cache: true, // Enable caching
|
||||
}
|
||||
```
|
||||
|
||||
### C++ Implementation
|
||||
|
||||
In the generated C++ code, caching uses JSC's WriteBarrier:
|
||||
|
||||
```cpp
|
||||
JSC_DEFINE_CUSTOM_GETTER(TextDecoderPrototype__encodingGetterWrap, (...)) {
|
||||
auto& vm = JSC::getVM(lexicalGlobalObject);
|
||||
Zig::GlobalObject *globalObject = reinterpret_cast<Zig::GlobalObject*>(lexicalGlobalObject);
|
||||
auto throwScope = DECLARE_THROW_SCOPE(vm);
|
||||
JSTextDecoder* thisObject = jsCast<JSTextDecoder*>(JSValue::decode(encodedThisValue));
|
||||
JSC::EnsureStillAliveScope thisArg = JSC::EnsureStillAliveScope(thisObject);
|
||||
|
||||
// Check for cached value and return if present
|
||||
if (JSValue cachedValue = thisObject->m_encoding.get())
|
||||
return JSValue::encode(cachedValue);
|
||||
|
||||
// Get value from Zig implementation
|
||||
JSC::JSValue result = JSC::JSValue::decode(
|
||||
TextDecoderPrototype__getEncoding(thisObject->wrapped(), globalObject)
|
||||
);
|
||||
RETURN_IF_EXCEPTION(throwScope, {});
|
||||
|
||||
// Store in cache for future access
|
||||
thisObject->m_encoding.set(vm, thisObject, result);
|
||||
RELEASE_AND_RETURN(throwScope, JSValue::encode(result));
|
||||
}
|
||||
```
|
||||
|
||||
### Zig Accessor Functions
|
||||
|
||||
For each cached property, the generator creates Zig accessor functions that allow Zig code to work with these GC-owned values:
|
||||
|
||||
```zig
|
||||
// External function declarations
|
||||
extern fn TextDecoderPrototype__encodingSetCachedValue(JSC.JSValue, *JSC.JSGlobalObject, JSC.JSValue) callconv(JSC.conv) void;
|
||||
extern fn TextDecoderPrototype__encodingGetCachedValue(JSC.JSValue) callconv(JSC.conv) JSC.JSValue;
|
||||
|
||||
/// `TextDecoder.encoding` setter
|
||||
/// This value will be visited by the garbage collector.
|
||||
pub fn encodingSetCached(thisValue: JSC.JSValue, globalObject: *JSC.JSGlobalObject, value: JSC.JSValue) void {
|
||||
JSC.markBinding(@src());
|
||||
TextDecoderPrototype__encodingSetCachedValue(thisValue, globalObject, value);
|
||||
}
|
||||
|
||||
/// `TextDecoder.encoding` getter
|
||||
/// This value will be visited by the garbage collector.
|
||||
pub fn encodingGetCached(thisValue: JSC.JSValue) ?JSC.JSValue {
|
||||
JSC.markBinding(@src());
|
||||
const result = TextDecoderPrototype__encodingGetCachedValue(thisValue);
|
||||
if (result == .zero)
|
||||
return null;
|
||||
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
### Benefits of GC-Owned Values
|
||||
|
||||
This system provides several key benefits:
|
||||
|
||||
1. **Automatic Memory Management**: The JavaScriptCore GC tracks and manages these values
|
||||
2. **Proper Garbage Collection**: The WriteBarrier ensures values are properly visited during GC
|
||||
3. **Consistent Access**: Zig code can easily get/set these cached JS values
|
||||
4. **Performance**: Cached values avoid repeated computation or serialization
|
||||
|
||||
### Use Cases
|
||||
|
||||
GC-owned cached values are particularly useful for:
|
||||
|
||||
1. **Computed Properties**: Store expensive computation results
|
||||
2. **Lazily Created Objects**: Create objects only when needed, then cache them
|
||||
3. **References to Other Objects**: Store references to other JS objects that need GC tracking
|
||||
4. **Memoization**: Cache results based on input parameters
|
||||
|
||||
The WriteBarrier mechanism ensures that any JS values stored in this way are properly tracked by the garbage collector.
|
||||
|
||||
## Memory Management and Finalization
|
||||
|
||||
The binding system handles memory management across the JavaScript/Zig boundary:
|
||||
|
||||
1. **Object Creation**: JavaScript `new TextDecoder()` creates both a JS wrapper and a Zig struct
|
||||
2. **Reference Tracking**: JSC's GC tracks all JS references to the object
|
||||
3. **Finalization**: When the JS object is collected, the finalizer releases Zig resources
|
||||
|
||||
Bun uses a consistent pattern for resource cleanup:
|
||||
|
||||
```zig
|
||||
// Resource cleanup method - separate from finalization
|
||||
pub fn deinit(this: *TextDecoder) void {
|
||||
// Release resources like strings
|
||||
this._encoding.deref(); // String deref pattern
|
||||
|
||||
// Free any buffers
|
||||
if (this.buffer) |buffer| {
|
||||
bun.default_allocator.free(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
// Called by the GC when object is collected
|
||||
pub fn finalize(this: *TextDecoder) void {
|
||||
JSC.markBinding(@src()); // For debugging
|
||||
this.deinit(); // Clean up resources
|
||||
bun.default_allocator.destroy(this); // Free the object itself
|
||||
}
|
||||
```
|
||||
|
||||
Some objects that hold references to other JS objects use `.deref()` instead:
|
||||
|
||||
```zig
|
||||
pub fn finalize(this: *SocketAddress) void {
|
||||
JSC.markBinding(@src());
|
||||
this._presentation.deref(); // Release references
|
||||
this.destroy();
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling with JSError
|
||||
|
||||
Bun uses `bun.JSError!JSValue` return type for proper error handling:
|
||||
|
||||
```zig
|
||||
pub fn decode(
|
||||
this: *TextDecoder,
|
||||
globalObject: *JSGlobalObject,
|
||||
callFrame: *JSC.CallFrame
|
||||
) bun.JSError!JSC.JSValue {
|
||||
// Throwing an error
|
||||
if (callFrame.argumentCount() < 1) {
|
||||
return globalObject.throw("Missing required argument", .{});
|
||||
}
|
||||
|
||||
// Or returning a success value
|
||||
return JSC.JSValue.jsString(globalObject, "Success!");
|
||||
}
|
||||
```
|
||||
|
||||
This pattern allows Zig functions to:
|
||||
1. Return JavaScript values on success
|
||||
2. Throw JavaScript exceptions on error
|
||||
3. Propagate errors automatically through the call stack
|
||||
|
||||
## Type Safety and Error Handling
|
||||
|
||||
The binding system includes robust error handling:
|
||||
|
||||
```cpp
|
||||
// Example of type checking in generated code
|
||||
JSTextDecoder* thisObject = jsDynamicCast<JSTextDecoder*>(callFrame->thisValue());
|
||||
if (UNLIKELY(!thisObject)) {
|
||||
scope.throwException(lexicalGlobalObject,
|
||||
Bun::createInvalidThisError(lexicalGlobalObject, callFrame->thisValue(), "TextDecoder"_s));
|
||||
return {};
|
||||
}
|
||||
```
|
||||
|
||||
## Prototypal Inheritance
|
||||
|
||||
The binding system creates proper JavaScript prototype chains:
|
||||
|
||||
1. **Constructor**: JSTextDecoderConstructor with standard .prototype property
|
||||
2. **Prototype**: JSTextDecoderPrototype with methods and properties
|
||||
3. **Instances**: Each JSTextDecoder instance with __proto__ pointing to prototype
|
||||
|
||||
This ensures JavaScript inheritance works as expected:
|
||||
|
||||
```cpp
|
||||
// From generated code
|
||||
void JSTextDecoderConstructor::finishCreation(VM& vm, JSC::JSGlobalObject* globalObject, JSTextDecoderPrototype* prototype)
|
||||
{
|
||||
Base::finishCreation(vm, 0, "TextDecoder"_s, PropertyAdditionMode::WithoutStructureTransition);
|
||||
|
||||
// Set up the prototype chain
|
||||
putDirectWithoutTransition(vm, vm.propertyNames->prototype, prototype, PropertyAttribute::DontEnum | PropertyAttribute::DontDelete | PropertyAttribute::ReadOnly);
|
||||
ASSERT(inherits(info()));
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
The binding system is optimized for performance:
|
||||
|
||||
1. **Direct Pointer Access**: JavaScript objects maintain a direct pointer to Zig objects
|
||||
2. **Property Caching**: WriteBarrier caching avoids repeated native calls for stable properties
|
||||
3. **Memory Management**: JSC garbage collection integrated with Zig memory management
|
||||
4. **Type Conversion**: Fast paths for common JavaScript/Zig type conversions
|
||||
|
||||
## Creating a New Class Binding
|
||||
|
||||
To create a new class binding in Bun:
|
||||
|
||||
1. **Define the class interface** in a `.classes.ts` file:
|
||||
```typescript
|
||||
define({
|
||||
name: "MyClass",
|
||||
constructor: true,
|
||||
finalize: true,
|
||||
proto: {
|
||||
myMethod: {
|
||||
args: 1,
|
||||
},
|
||||
myProperty: {
|
||||
getter: true,
|
||||
cache: true,
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
2. **Implement the native functionality** in a `.zig` file:
|
||||
```zig
|
||||
pub const MyClass = struct {
|
||||
// State
|
||||
value: []const u8,
|
||||
|
||||
// Generated bindings
|
||||
pub usingnamespace JSC.Codegen.JSMyClass;
|
||||
pub usingnamespace bun.New(@This());
|
||||
|
||||
// Constructor
|
||||
pub fn constructor(
|
||||
globalObject: *JSGlobalObject,
|
||||
callFrame: *JSC.CallFrame,
|
||||
) bun.JSError!*MyClass {
|
||||
const arg = callFrame.argument(0);
|
||||
// Implementation
|
||||
}
|
||||
|
||||
// Method
|
||||
pub fn myMethod(
|
||||
this: *MyClass,
|
||||
globalObject: *JSGlobalObject,
|
||||
callFrame: *JSC.CallFrame,
|
||||
) bun.JSError!JSC.JSValue {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
// Getter
|
||||
pub fn getMyProperty(this: *MyClass, globalObject: *JSGlobalObject) JSC.JSValue {
|
||||
return JSC.JSValue.jsString(globalObject, this.value);
|
||||
}
|
||||
|
||||
// Resource cleanup
|
||||
pub fn deinit(this: *MyClass) void {
|
||||
// Clean up resources
|
||||
}
|
||||
|
||||
pub fn finalize(this: *MyClass) void {
|
||||
this.deinit();
|
||||
bun.default_allocator.destroy(this);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
3. **The binding generator** creates all necessary C++ and Zig glue code to connect JavaScript and Zig, including:
|
||||
- C++ class definitions
|
||||
- Method and property bindings
|
||||
- Memory management utilities
|
||||
- GC integration code
|
||||
|
||||
## Generated Code Structure
|
||||
|
||||
The binding generator produces several components:
|
||||
|
||||
### 1. C++ Classes
|
||||
|
||||
For each Zig class, the system generates:
|
||||
|
||||
- **JS<Class>**: Main wrapper that holds a pointer to the Zig object (`JSTextDecoder`)
|
||||
- **JS<Class>Prototype**: Contains methods and properties (`JSTextDecoderPrototype`)
|
||||
- **JS<Class>Constructor**: Implementation of the JavaScript constructor (`JSTextDecoderConstructor`)
|
||||
|
||||
### 2. C++ Methods and Properties
|
||||
|
||||
- **Method Callbacks**: `TextDecoderPrototype__decodeCallback`
|
||||
- **Property Getters/Setters**: `TextDecoderPrototype__encodingGetterWrap`
|
||||
- **Initialization Functions**: `finishCreation` methods for setting up the class
|
||||
|
||||
### 3. Zig Bindings
|
||||
|
||||
- **External Function Declarations**:
|
||||
```zig
|
||||
extern fn TextDecoderPrototype__decode(*TextDecoder, *JSC.JSGlobalObject, *JSC.CallFrame) callconv(JSC.conv) JSC.EncodedJSValue;
|
||||
```
|
||||
|
||||
- **Cached Value Accessors**:
|
||||
```zig
|
||||
pub fn encodingGetCached(thisValue: JSC.JSValue) ?JSC.JSValue { ... }
|
||||
pub fn encodingSetCached(thisValue: JSC.JSValue, globalObject: *JSC.JSGlobalObject, value: JSC.JSValue) void { ... }
|
||||
```
|
||||
|
||||
- **Constructor Helpers**:
|
||||
```zig
|
||||
pub fn create(globalObject: *JSC.JSGlobalObject) bun.JSError!JSC.JSValue { ... }
|
||||
```
|
||||
|
||||
### 4. GC Integration
|
||||
|
||||
- **Memory Cost Calculation**: `estimatedSize` method
|
||||
- **Child Visitor Methods**: `visitChildrenImpl` and `visitAdditionalChildren`
|
||||
- **Heap Analysis**: `analyzeHeap` for debugging memory issues
|
||||
|
||||
This architecture makes it possible to implement high-performance native functionality in Zig while exposing a clean, idiomatic JavaScript API to users.
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -28,7 +28,7 @@ This adds a new flag --bail to bun test. When set, it will stop running tests af
|
||||
|
||||
- [ ] I checked the lifetime of memory allocated to verify it's (1) freed and (2) only freed when it should be
|
||||
- [ ] I included a test for the new code, or an existing test covers it
|
||||
- [ ] JSValue used outside outside of the stack is either wrapped in a JSC.Strong or is JSValueProtect'ed
|
||||
- [ ] JSValue used outside of the stack is either wrapped in a JSC.Strong or is JSValueProtect'ed
|
||||
- [ ] I wrote TypeScript/JavaScript tests and they pass locally (`bun-debug test test-file-name.test`)
|
||||
-->
|
||||
|
||||
|
||||
6
.vscode/launch.json
generated
vendored
6
.vscode/launch.json
generated
vendored
@@ -1118,7 +1118,11 @@
|
||||
"request": "attach",
|
||||
"name": "rr",
|
||||
"trace": "Off",
|
||||
"setupCommands": ["handle SIGPWR nostop noprint pass"],
|
||||
"setupCommands": [
|
||||
"handle SIGPWR nostop noprint pass",
|
||||
"source ${workspaceFolder}/misctools/gdb/std_gdb_pretty_printers.py",
|
||||
"source ${workspaceFolder}/misctools/gdb/zig_gdb_pretty_printers.py",
|
||||
],
|
||||
},
|
||||
],
|
||||
"inputs": [
|
||||
|
||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -35,8 +35,6 @@
|
||||
// "zig.zls.enableBuildOnSave": true,
|
||||
// "zig.buildOnSave": true,
|
||||
"zig.buildFilePath": "${workspaceFolder}/build.zig",
|
||||
"zig.path": "${workspaceFolder}/vendor/zig/zig.exe",
|
||||
"zig.zls.path": "${workspaceFolder}/vendor/zig/zls.exe",
|
||||
"zig.formattingProvider": "zls",
|
||||
"zig.zls.enableInlayHints": false,
|
||||
"[zig]": {
|
||||
|
||||
@@ -53,39 +53,39 @@ $ brew install bun
|
||||
|
||||
## Install LLVM
|
||||
|
||||
Bun requires LLVM 18 (`clang` is part of LLVM). This version requirement is to match WebKit (precompiled), as mismatching versions will cause memory allocation failures at runtime. In most cases, you can install LLVM through your system package manager:
|
||||
Bun requires LLVM 19 (`clang` is part of LLVM). This version requirement is to match WebKit (precompiled), as mismatching versions will cause memory allocation failures at runtime. In most cases, you can install LLVM through your system package manager:
|
||||
|
||||
{% codetabs group="os" %}
|
||||
|
||||
```bash#macOS (Homebrew)
|
||||
$ brew install llvm@18
|
||||
$ brew install llvm@19
|
||||
```
|
||||
|
||||
```bash#Ubuntu/Debian
|
||||
$ # LLVM has an automatic installation script that is compatible with all versions of Ubuntu
|
||||
$ wget https://apt.llvm.org/llvm.sh -O - | sudo bash -s -- 18 all
|
||||
$ wget https://apt.llvm.org/llvm.sh -O - | sudo bash -s -- 19 all
|
||||
```
|
||||
|
||||
```bash#Arch
|
||||
$ sudo pacman -S llvm clang18 lld
|
||||
$ sudo pacman -S llvm clang lld
|
||||
```
|
||||
|
||||
```bash#Fedora
|
||||
$ sudo dnf install llvm18 clang18 lld18-devel
|
||||
$ sudo dnf install llvm clang lld-devel
|
||||
```
|
||||
|
||||
```bash#openSUSE Tumbleweed
|
||||
$ sudo zypper install clang18 lld18 llvm18
|
||||
$ sudo zypper install clang19 lld19 llvm19
|
||||
```
|
||||
|
||||
{% /codetabs %}
|
||||
|
||||
If none of the above solutions apply, you will have to install it [manually](https://github.com/llvm/llvm-project/releases/tag/llvmorg-19.1.7).
|
||||
|
||||
Make sure Clang/LLVM 18 is in your path:
|
||||
Make sure Clang/LLVM 19 is in your path:
|
||||
|
||||
```bash
|
||||
$ which clang-18
|
||||
$ which clang-19
|
||||
```
|
||||
|
||||
If not, run this to manually add it:
|
||||
@@ -94,13 +94,13 @@ If not, run this to manually add it:
|
||||
|
||||
```bash#macOS (Homebrew)
|
||||
# use fish_add_path if you're using fish
|
||||
# use path+="$(brew --prefix llvm@18)/bin" if you are using zsh
|
||||
$ export PATH="$(brew --prefix llvm@18)/bin:$PATH"
|
||||
# use path+="$(brew --prefix llvm@19)/bin" if you are using zsh
|
||||
$ export PATH="$(brew --prefix llvm@19)/bin:$PATH"
|
||||
```
|
||||
|
||||
```bash#Arch
|
||||
# use fish_add_path if you're using fish
|
||||
$ export PATH="$PATH:/usr/lib/llvm18/bin"
|
||||
$ export PATH="$PATH:/usr/lib/llvm19/bin"
|
||||
```
|
||||
|
||||
{% /codetabs %}
|
||||
@@ -134,6 +134,16 @@ We recommend adding `./build/debug` to your `$PATH` so that you can run `bun-deb
|
||||
$ bun-debug
|
||||
```
|
||||
|
||||
## Running debug builds
|
||||
|
||||
The `bd` package.json script compiles and runs a debug build of Bun, only printing the output of the build process if it fails.
|
||||
|
||||
```sh
|
||||
$ bun bd <args>
|
||||
$ bun bd test foo.test.ts
|
||||
$ bun bd ./foo.ts
|
||||
```
|
||||
|
||||
## Code generation scripts
|
||||
|
||||
Several code generation scripts are used during Bun's build process. These are run automatically when changes are made to certain files.
|
||||
@@ -250,7 +260,7 @@ The issue may manifest when initially running `bun setup` as Clang being unable
|
||||
```
|
||||
The C++ compiler
|
||||
|
||||
"/usr/bin/clang++-18"
|
||||
"/usr/bin/clang++-19"
|
||||
|
||||
is not able to compile a simple test program.
|
||||
```
|
||||
|
||||
44
bench/crypto/aes-gcm-throughput.mjs
Normal file
44
bench/crypto/aes-gcm-throughput.mjs
Normal file
@@ -0,0 +1,44 @@
|
||||
import { bench, run } from "../runner.mjs";
|
||||
import crypto from "node:crypto";
|
||||
import { Buffer } from "node:buffer";
|
||||
|
||||
const keylen = { "aes-128-gcm": 16, "aes-192-gcm": 24, "aes-256-gcm": 32 };
|
||||
const sizes = [4 * 1024, 1024 * 1024];
|
||||
const ciphers = ["aes-128-gcm", "aes-192-gcm", "aes-256-gcm"];
|
||||
|
||||
const messages = {};
|
||||
sizes.forEach(size => {
|
||||
messages[size] = Buffer.alloc(size, "b");
|
||||
});
|
||||
|
||||
const keys = {};
|
||||
ciphers.forEach(cipher => {
|
||||
keys[cipher] = crypto.randomBytes(keylen[cipher]);
|
||||
});
|
||||
|
||||
// Fixed IV and AAD
|
||||
const iv = crypto.randomBytes(12);
|
||||
const associate_data = Buffer.alloc(16, "z");
|
||||
|
||||
for (const cipher of ciphers) {
|
||||
for (const size of sizes) {
|
||||
const message = messages[size];
|
||||
const key = keys[cipher];
|
||||
|
||||
bench(`${cipher} ${size / 1024}KB`, () => {
|
||||
const alice = crypto.createCipheriv(cipher, key, iv);
|
||||
alice.setAAD(associate_data);
|
||||
const enc = alice.update(message);
|
||||
alice.final();
|
||||
const tag = alice.getAuthTag();
|
||||
|
||||
const bob = crypto.createDecipheriv(cipher, key, iv);
|
||||
bob.setAuthTag(tag);
|
||||
bob.setAAD(associate_data);
|
||||
bob.update(enc);
|
||||
bob.final();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
await run();
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
// Enable latest features
|
||||
"lib": ["ESNext", "DOM"],
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"moduleDetection": "force",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
// Enable latest features
|
||||
"lib": ["ESNext", "DOM"],
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"moduleDetection": "force",
|
||||
|
||||
28
bench/snippets/redis-simple.mjs
Normal file
28
bench/snippets/redis-simple.mjs
Normal file
@@ -0,0 +1,28 @@
|
||||
import ioredis from "ioredis";
|
||||
|
||||
const redis = process.argv.includes("--redis=native")
|
||||
? Bun.redis
|
||||
: new ioredis("redis://localhost:6379", {
|
||||
enableAutoPipelining: true,
|
||||
});
|
||||
|
||||
const isBun = globalThis.Bun && redis === Bun.redis;
|
||||
for (let count of [100, 1000]) {
|
||||
function iterate() {
|
||||
const promises = new Array(count);
|
||||
for (let i = 0; i < count; i++) {
|
||||
promises[i] = redis.get("greeting");
|
||||
}
|
||||
|
||||
return Promise.all(promises);
|
||||
}
|
||||
|
||||
const label = isBun ? `Bun.redis` : `ioredis`;
|
||||
console.time(`GET 'greeting' batches of ${count} - ${label} (${count} iterations)`);
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
await iterate();
|
||||
}
|
||||
console.timeEnd(`GET 'greeting' batches of ${count} - ${label} (${count} iterations)`);
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
42
build.zig
42
build.zig
@@ -285,6 +285,40 @@ pub fn build(b: *Build) !void {
|
||||
step.dependOn(addInstallObjectFile(b, bun_obj, "bun-zig", obj_format));
|
||||
}
|
||||
|
||||
// zig build test
|
||||
{
|
||||
var step = b.step("test", "Build Bun's unit test suite");
|
||||
var o = build_options;
|
||||
var unit_tests = b.addTest(.{
|
||||
.name = "bun-test",
|
||||
.optimize = build_options.optimize,
|
||||
.root_source_file = b.path("src/unit_test.zig"),
|
||||
.test_runner = .{ .path = b.path("src/main_test.zig"), .mode = .simple },
|
||||
.target = build_options.target,
|
||||
.use_llvm = !build_options.no_llvm,
|
||||
.use_lld = if (build_options.os == .mac) false else !build_options.no_llvm,
|
||||
.omit_frame_pointer = false,
|
||||
.strip = false,
|
||||
});
|
||||
configureObj(b, &o, unit_tests);
|
||||
// Setting `linker_allow_shlib_undefined` causes the linker to ignore
|
||||
// all undefined symbols. We want this because all we care about is the
|
||||
// object file Zig creates; we perform our own linking later. There is
|
||||
// currently no way to make a test build that only creates an object
|
||||
// file w/o creating an executable.
|
||||
//
|
||||
// See: https://github.com/ziglang/zig/issues/23374
|
||||
unit_tests.linker_allow_shlib_undefined = true;
|
||||
unit_tests.link_function_sections = true;
|
||||
unit_tests.link_data_sections = true;
|
||||
unit_tests.bundle_ubsan_rt = false;
|
||||
|
||||
const bin = unit_tests.getEmittedBin();
|
||||
const obj = bin.dirname().path(b, "bun-test.o");
|
||||
const cpy_obj = b.addInstallFile(obj, "bun-test.o");
|
||||
step.dependOn(&cpy_obj.step);
|
||||
}
|
||||
|
||||
// zig build windows-shim
|
||||
{
|
||||
var step = b.step("windows-shim", "Build the Windows shim (bun_shim_impl.exe + bun_shim_debug.exe)");
|
||||
@@ -456,6 +490,11 @@ pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile {
|
||||
.omit_frame_pointer = false,
|
||||
.strip = false, // stripped at the end
|
||||
});
|
||||
configureObj(b, opts, obj);
|
||||
return obj;
|
||||
}
|
||||
|
||||
fn configureObj(b: *Build, opts: *BunBuildOptions, obj: *Compile) void {
|
||||
if (opts.enable_asan) {
|
||||
if (@hasField(Build.Module, "sanitize_address")) {
|
||||
obj.root_module.sanitize_address = true;
|
||||
@@ -465,6 +504,7 @@ pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile {
|
||||
}
|
||||
}
|
||||
obj.bundle_compiler_rt = false;
|
||||
obj.bundle_ubsan_rt = false;
|
||||
obj.root_module.omit_frame_pointer = false;
|
||||
|
||||
// Link libc
|
||||
@@ -494,8 +534,6 @@ pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile {
|
||||
|
||||
const translate_c = getTranslateC(b, opts.target, opts.optimize);
|
||||
obj.root_module.addImport("translated-c-headers", translate_c.createModule());
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
const ObjectFormat = enum {
|
||||
|
||||
3
bun.lock
3
bun.lock
@@ -27,9 +27,10 @@
|
||||
},
|
||||
"packages/bun-types": {
|
||||
"name": "bun-types",
|
||||
"version": "1.2.5",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"@types/ws": "~8.5.10",
|
||||
"@types/ws": "*",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^1.5.3",
|
||||
|
||||
@@ -142,6 +142,14 @@ if(UNIX)
|
||||
-fno-unwind-tables
|
||||
-fno-asynchronous-unwind-tables
|
||||
)
|
||||
|
||||
# needed for libuv stubs because they use
|
||||
# C23 feature which lets you define parameter without
|
||||
# name
|
||||
register_compiler_flags(
|
||||
DESCRIPTION "Allow C23 extensions"
|
||||
-Wno-c23-extensions
|
||||
)
|
||||
endif()
|
||||
|
||||
register_compiler_flags(
|
||||
|
||||
@@ -633,7 +633,7 @@ function(register_repository)
|
||||
set(GIT_PATH ${VENDOR_PATH}/${GIT_NAME})
|
||||
endif()
|
||||
|
||||
set(GIT_EFFECTIVE_OUTPUTS)
|
||||
set(GIT_EFFECTIVE_OUTPUTS ${GIT_PATH}/.ref)
|
||||
foreach(output ${GIT_OUTPUTS})
|
||||
list(APPEND GIT_EFFECTIVE_OUTPUTS ${GIT_PATH}/${output})
|
||||
endforeach()
|
||||
@@ -751,11 +751,17 @@ function(register_cmake_command)
|
||||
list(APPEND MAKE_EFFECTIVE_ARGS --fresh)
|
||||
endif()
|
||||
|
||||
set(MAKE_SOURCES)
|
||||
if(TARGET clone-${MAKE_TARGET})
|
||||
list(APPEND MAKE_SOURCES ${MAKE_CWD}/.ref)
|
||||
endif()
|
||||
|
||||
register_command(
|
||||
COMMENT "Configuring ${MAKE_TARGET}"
|
||||
TARGET configure-${MAKE_TARGET}
|
||||
COMMAND ${CMAKE_COMMAND} ${MAKE_EFFECTIVE_ARGS}
|
||||
CWD ${MAKE_CWD}
|
||||
SOURCES ${MAKE_SOURCES}
|
||||
OUTPUTS ${MAKE_BUILD_PATH}/CMakeCache.txt
|
||||
)
|
||||
|
||||
@@ -807,6 +813,7 @@ function(register_cmake_command)
|
||||
TARGETS configure-${MAKE_TARGET}
|
||||
COMMAND ${CMAKE_COMMAND} ${MAKE_BUILD_ARGS}
|
||||
CWD ${MAKE_CWD}
|
||||
SOURCES ${MAKE_SOURCES}
|
||||
ARTIFACTS ${MAKE_ARTIFACTS}
|
||||
)
|
||||
|
||||
|
||||
@@ -26,6 +26,15 @@ else()
|
||||
setx(DEBUG OFF)
|
||||
endif()
|
||||
|
||||
optionx(BUN_TEST BOOL "Build Bun's unit test suite instead of the normal build" DEFAULT OFF)
|
||||
|
||||
if (BUN_TEST)
|
||||
setx(TEST ON)
|
||||
else()
|
||||
setx(TEST OFF)
|
||||
endif()
|
||||
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES "MinSizeRel")
|
||||
setx(ENABLE_SMOL ON)
|
||||
endif()
|
||||
@@ -62,7 +71,14 @@ if(ARCH STREQUAL "x64")
|
||||
optionx(ENABLE_BASELINE BOOL "If baseline features should be used for older CPUs (e.g. disables AVX, AVX2)" DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
optionx(ENABLE_LOGS BOOL "If debug logs should be enabled" DEFAULT ${DEBUG})
|
||||
# Disabling logs by default for tests yields faster builds
|
||||
if (DEBUG AND NOT TEST)
|
||||
set(DEFAULT_ENABLE_LOGS ON)
|
||||
else()
|
||||
set(DEFAULT_ENABLE_LOGS OFF)
|
||||
endif()
|
||||
|
||||
optionx(ENABLE_LOGS BOOL "If debug logs should be enabled" DEFAULT ${DEFAULT_ENABLE_LOGS})
|
||||
optionx(ENABLE_ASSERTIONS BOOL "If debug assertions should be enabled" DEFAULT ${DEBUG})
|
||||
|
||||
optionx(ENABLE_CANARY BOOL "If canary features should be enabled" DEFAULT ON)
|
||||
|
||||
@@ -29,6 +29,9 @@ else()
|
||||
endif()
|
||||
|
||||
set(ZIG_NAME bootstrap-${ZIG_ARCH}-${ZIG_OS_ABI})
|
||||
if(ZIG_COMPILER_SAFE)
|
||||
set(ZIG_NAME ${ZIG_NAME}-ReleaseSafe)
|
||||
endif()
|
||||
set(ZIG_FILENAME ${ZIG_NAME}.zip)
|
||||
|
||||
if(CMAKE_HOST_WIN32)
|
||||
|
||||
@@ -4,7 +4,7 @@ register_repository(
|
||||
REPOSITORY
|
||||
oven-sh/boringssl
|
||||
COMMIT
|
||||
914b005ef3ece44159dca0ffad74eb42a9f6679f
|
||||
7a5d984c69b0c34c4cbb56c6812eaa5b9bef485c
|
||||
)
|
||||
|
||||
register_cmake_command(
|
||||
|
||||
@@ -12,6 +12,10 @@ else()
|
||||
set(bunStrip bun)
|
||||
endif()
|
||||
|
||||
if(TEST)
|
||||
set(bun ${bun}-test)
|
||||
endif()
|
||||
|
||||
set(bunExe ${bun}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
|
||||
if(bunStrip)
|
||||
@@ -528,7 +532,6 @@ file(GLOB_RECURSE BUN_ZIG_SOURCES ${CONFIGURE_DEPENDS}
|
||||
|
||||
list(APPEND BUN_ZIG_SOURCES
|
||||
${CWD}/build.zig
|
||||
${CWD}/src/main.zig
|
||||
${BUN_BINDGEN_ZIG_OUTPUTS}
|
||||
)
|
||||
|
||||
@@ -550,7 +553,13 @@ else()
|
||||
list(APPEND BUN_ZIG_GENERATED_SOURCES ${BUN_BAKE_RUNTIME_OUTPUTS})
|
||||
endif()
|
||||
|
||||
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o)
|
||||
if (TEST)
|
||||
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-test.o)
|
||||
set(ZIG_STEPS test)
|
||||
else()
|
||||
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o)
|
||||
set(ZIG_STEPS obj)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm|ARM|arm64|ARM64|aarch64|AARCH64")
|
||||
if(APPLE)
|
||||
@@ -579,10 +588,10 @@ register_command(
|
||||
GROUP
|
||||
console
|
||||
COMMENT
|
||||
"Building src/*.zig for ${ZIG_TARGET}"
|
||||
"Building src/*.zig into ${BUN_ZIG_OUTPUT} for ${ZIG_TARGET}"
|
||||
COMMAND
|
||||
${ZIG_EXECUTABLE}
|
||||
build obj
|
||||
build ${ZIG_STEPS}
|
||||
${CMAKE_ZIG_FLAGS}
|
||||
--prefix ${BUILD_PATH}
|
||||
-Dobj_format=${ZIG_OBJECT_FORMAT}
|
||||
@@ -596,6 +605,7 @@ register_command(
|
||||
-Dcodegen_path=${CODEGEN_PATH}
|
||||
-Dcodegen_embed=$<IF:$<BOOL:${CODEGEN_EMBED}>,true,false>
|
||||
--prominent-compile-errors
|
||||
--summary all
|
||||
${ZIG_FLAGS_BUN}
|
||||
ARTIFACTS
|
||||
${BUN_ZIG_OUTPUT}
|
||||
@@ -635,6 +645,8 @@ file(GLOB BUN_C_SOURCES ${CONFIGURE_DEPENDS}
|
||||
${BUN_USOCKETS_SOURCE}/src/eventing/*.c
|
||||
${BUN_USOCKETS_SOURCE}/src/internal/*.c
|
||||
${BUN_USOCKETS_SOURCE}/src/crypto/*.c
|
||||
${CWD}/src/bun.js/bindings/uv-posix-polyfills.c
|
||||
${CWD}/src/bun.js/bindings/uv-posix-stubs.c
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
@@ -785,6 +797,10 @@ target_include_directories(${bun} PRIVATE
|
||||
${NODEJS_HEADERS_PATH}/include
|
||||
)
|
||||
|
||||
if(NOT WIN32)
|
||||
target_include_directories(${bun} PRIVATE ${CWD}/src/bun.js/bindings/libuv)
|
||||
endif()
|
||||
|
||||
if(LINUX)
|
||||
include(CheckIncludeFiles)
|
||||
check_include_files("sys/queue.h" HAVE_SYS_QUEUE_H)
|
||||
|
||||
@@ -2,7 +2,7 @@ option(WEBKIT_VERSION "The version of WebKit to use")
|
||||
option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading")
|
||||
|
||||
if(NOT WEBKIT_VERSION)
|
||||
set(WEBKIT_VERSION 91bf2baced1b1309c7e05f19177c97fefec20976)
|
||||
set(WEBKIT_VERSION 06820714a7990ea77c78157f9eeaabaf56c2098a)
|
||||
endif()
|
||||
|
||||
string(SUBSTRING ${WEBKIT_VERSION} 0 16 WEBKIT_VERSION_PREFIX)
|
||||
|
||||
@@ -50,6 +50,7 @@ optionx(ZIG_OBJECT_FORMAT "obj|bc" "Output file format for Zig object files" DEF
|
||||
|
||||
optionx(ZIG_LOCAL_CACHE_DIR FILEPATH "The path to local the zig cache directory" DEFAULT ${CACHE_PATH}/zig/local)
|
||||
optionx(ZIG_GLOBAL_CACHE_DIR FILEPATH "The path to the global zig cache directory" DEFAULT ${CACHE_PATH}/zig/global)
|
||||
optionx(ZIG_COMPILER_SAFE BOOL "Download a ReleaseSafe build of the Zig compiler. Only availble on macos aarch64." DEFAULT OFF)
|
||||
|
||||
setenv(ZIG_LOCAL_CACHE_DIR ${ZIG_LOCAL_CACHE_DIR})
|
||||
setenv(ZIG_GLOBAL_CACHE_DIR ${ZIG_GLOBAL_CACHE_DIR})
|
||||
@@ -78,6 +79,7 @@ register_command(
|
||||
-DZIG_PATH=${ZIG_PATH}
|
||||
-DZIG_COMMIT=${ZIG_COMMIT}
|
||||
-DENABLE_ASAN=${ENABLE_ASAN}
|
||||
-DZIG_COMPILER_SAFE=${ZIG_COMPILER_SAFE}
|
||||
-P ${CWD}/cmake/scripts/DownloadZig.cmake
|
||||
SOURCES
|
||||
${CWD}/cmake/scripts/DownloadZig.cmake
|
||||
|
||||
449
docs/api/cookie.md
Normal file
449
docs/api/cookie.md
Normal file
@@ -0,0 +1,449 @@
|
||||
Bun provides native APIs for working with HTTP cookies through `Bun.Cookie` and `Bun.CookieMap`. These APIs offer fast, easy-to-use methods for parsing, generating, and manipulating cookies in HTTP requests and responses.
|
||||
|
||||
## CookieMap class
|
||||
|
||||
`Bun.CookieMap` provides a Map-like interface for working with collections of cookies. It implements the `Iterable` interface, allowing you to use it with `for...of` loops and other iteration methods.
|
||||
|
||||
```ts
|
||||
// Empty cookie map
|
||||
const cookies = new Bun.CookieMap();
|
||||
|
||||
// From a cookie string
|
||||
const cookies1 = new Bun.CookieMap("name=value; foo=bar");
|
||||
|
||||
// From an object
|
||||
const cookies2 = new Bun.CookieMap({
|
||||
session: "abc123",
|
||||
theme: "dark",
|
||||
});
|
||||
|
||||
// From an array of name/value pairs
|
||||
const cookies3 = new Bun.CookieMap([
|
||||
["session", "abc123"],
|
||||
["theme", "dark"],
|
||||
]);
|
||||
```
|
||||
|
||||
### In HTTP servers
|
||||
|
||||
In Bun's HTTP server, the `cookies` property on the request object (in `routes`) is an instance of `CookieMap`:
|
||||
|
||||
```ts
|
||||
const server = Bun.serve({
|
||||
routes: {
|
||||
"/": req => {
|
||||
// Access request cookies
|
||||
const cookies = req.cookies;
|
||||
|
||||
// Get a specific cookie
|
||||
const sessionCookie = cookies.get("session");
|
||||
if (sessionCookie != null) {
|
||||
console.log(sessionCookie);
|
||||
}
|
||||
|
||||
// Check if a cookie exists
|
||||
if (cookies.has("theme")) {
|
||||
// ...
|
||||
}
|
||||
|
||||
// Set a cookie, it will be automatically applied to the response
|
||||
cookies.set("visited", "true");
|
||||
|
||||
return new Response("Hello");
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
console.log("Server listening at: " + server.url);
|
||||
```
|
||||
|
||||
### Methods
|
||||
|
||||
#### `get(name: string): string | null`
|
||||
|
||||
Retrieves a cookie by name. Returns `null` if the cookie doesn't exist.
|
||||
|
||||
```ts
|
||||
// Get by name
|
||||
const cookie = cookies.get("session");
|
||||
|
||||
if (cookie != null) {
|
||||
console.log(cookie);
|
||||
}
|
||||
```
|
||||
|
||||
#### `has(name: string): boolean`
|
||||
|
||||
Checks if a cookie with the given name exists.
|
||||
|
||||
```ts
|
||||
// Check if cookie exists
|
||||
if (cookies.has("session")) {
|
||||
// Cookie exists
|
||||
}
|
||||
```
|
||||
|
||||
#### `set(name: string, value: string): void`
|
||||
|
||||
#### `set(options: CookieInit): void`
|
||||
|
||||
#### `set(cookie: Cookie): void`
|
||||
|
||||
Adds or updates a cookie in the map. Cookies default to `{ path: "/", sameSite: "lax" }`.
|
||||
|
||||
```ts
|
||||
// Set by name and value
|
||||
cookies.set("session", "abc123");
|
||||
|
||||
// Set using options object
|
||||
cookies.set({
|
||||
name: "theme",
|
||||
value: "dark",
|
||||
maxAge: 3600,
|
||||
secure: true,
|
||||
});
|
||||
|
||||
// Set using Cookie instance
|
||||
const cookie = new Bun.Cookie("visited", "true");
|
||||
cookies.set(cookie);
|
||||
```
|
||||
|
||||
#### `delete(name: string): void`
|
||||
|
||||
#### `delete(options: CookieStoreDeleteOptions): void`
|
||||
|
||||
Removes a cookie from the map. When applied to a Response, this adds a cookie with an empty string value and an expiry date in the past. A cookie will only delete successfully on the browser if the domain and path is the same as it was when the cookie was created.
|
||||
|
||||
```ts
|
||||
// Delete by name using default domain and path.
|
||||
cookies.delete("session");
|
||||
|
||||
// Delete with domain/path options.
|
||||
cookies.delete({
|
||||
name: "session",
|
||||
domain: "example.com",
|
||||
path: "/admin",
|
||||
});
|
||||
```
|
||||
|
||||
#### `toJSON(): Record<string, string>`
|
||||
|
||||
Converts the cookie map to a serializable format.
|
||||
|
||||
```ts
|
||||
const json = cookies.toJSON();
|
||||
```
|
||||
|
||||
#### `toSetCookieHeaders(): string[]`
|
||||
|
||||
Returns an array of values for Set-Cookie headers that can be used to apply all cookie changes.
|
||||
|
||||
When using `Bun.serve()`, you don't need to call this method explicitly. Any changes made to the `req.cookies` map are automatically applied to the response headers. This method is primarily useful when working with other HTTP server implementations.
|
||||
|
||||
```js
|
||||
import { createServer } from "node:http";
|
||||
import { CookieMap } from "bun";
|
||||
|
||||
const server = createServer((req, res) => {
|
||||
const cookieHeader = req.headers.cookie || "";
|
||||
const cookies = new CookieMap(cookieHeader);
|
||||
|
||||
cookies.set("view-count", Number(cookies.get("view-count") || "0") + 1);
|
||||
cookies.delete("session");
|
||||
|
||||
res.writeHead(200, {
|
||||
"Content-Type": "text/plain",
|
||||
"Set-Cookie": cookies.toSetCookieHeaders(),
|
||||
});
|
||||
res.end(`Found ${cookies.size} cookies`);
|
||||
});
|
||||
|
||||
server.listen(3000, () => {
|
||||
console.log("Server running at http://localhost:3000/");
|
||||
});
|
||||
```
|
||||
|
||||
### Iteration
|
||||
|
||||
`CookieMap` provides several methods for iteration:
|
||||
|
||||
```ts
|
||||
// Iterate over [name, cookie] entries
|
||||
for (const [name, value] of cookies) {
|
||||
console.log(`${name}: ${value}`);
|
||||
}
|
||||
|
||||
// Using entries()
|
||||
for (const [name, value] of cookies.entries()) {
|
||||
console.log(`${name}: ${value}`);
|
||||
}
|
||||
|
||||
// Using keys()
|
||||
for (const name of cookies.keys()) {
|
||||
console.log(name);
|
||||
}
|
||||
|
||||
// Using values()
|
||||
for (const value of cookies.values()) {
|
||||
console.log(value);
|
||||
}
|
||||
|
||||
// Using forEach
|
||||
cookies.forEach((value, name) => {
|
||||
console.log(`${name}: ${value}`);
|
||||
});
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
#### `size: number`
|
||||
|
||||
Returns the number of cookies in the map.
|
||||
|
||||
```ts
|
||||
console.log(cookies.size); // Number of cookies
|
||||
```
|
||||
|
||||
## Cookie class
|
||||
|
||||
`Bun.Cookie` represents an HTTP cookie with its name, value, and attributes.
|
||||
|
||||
```ts
|
||||
import { Cookie } from "bun";
|
||||
|
||||
// Create a basic cookie
|
||||
const cookie = new Bun.Cookie("name", "value");
|
||||
|
||||
// Create a cookie with options
|
||||
const secureSessionCookie = new Bun.Cookie("session", "abc123", {
|
||||
domain: "example.com",
|
||||
path: "/admin",
|
||||
expires: new Date(Date.now() + 86400000), // 1 day
|
||||
httpOnly: true,
|
||||
secure: true,
|
||||
sameSite: "strict",
|
||||
});
|
||||
|
||||
// Parse from a cookie string
|
||||
const parsedCookie = new Bun.Cookie("name=value; Path=/; HttpOnly");
|
||||
|
||||
// Create from an options object
|
||||
const objCookie = new Bun.Cookie({
|
||||
name: "theme",
|
||||
value: "dark",
|
||||
maxAge: 3600,
|
||||
secure: true,
|
||||
});
|
||||
```
|
||||
|
||||
### Constructors
|
||||
|
||||
```ts
|
||||
// Basic constructor with name/value
|
||||
new Bun.Cookie(name: string, value: string);
|
||||
|
||||
// Constructor with name, value, and options
|
||||
new Bun.Cookie(name: string, value: string, options: CookieInit);
|
||||
|
||||
// Constructor from cookie string
|
||||
new Bun.Cookie(cookieString: string);
|
||||
|
||||
// Constructor from cookie object
|
||||
new Bun.Cookie(options: CookieInit);
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
```ts
|
||||
cookie.name; // string - Cookie name
|
||||
cookie.value; // string - Cookie value
|
||||
cookie.domain; // string | null - Domain scope (null if not specified)
|
||||
cookie.path; // string - URL path scope (defaults to "/")
|
||||
cookie.expires; // number | undefined - Expiration timestamp (ms since epoch)
|
||||
cookie.secure; // boolean - Require HTTPS
|
||||
cookie.sameSite; // "strict" | "lax" | "none" - SameSite setting
|
||||
cookie.partitioned; // boolean - Whether the cookie is partitioned (CHIPS)
|
||||
cookie.maxAge; // number | undefined - Max age in seconds
|
||||
cookie.httpOnly; // boolean - Accessible only via HTTP (not JavaScript)
|
||||
```
|
||||
|
||||
### Methods
|
||||
|
||||
#### `isExpired(): boolean`
|
||||
|
||||
Checks if the cookie has expired.
|
||||
|
||||
```ts
|
||||
// Expired cookie (Date in the past)
|
||||
const expiredCookie = new Bun.Cookie("name", "value", {
|
||||
expires: new Date(Date.now() - 1000),
|
||||
});
|
||||
console.log(expiredCookie.isExpired()); // true
|
||||
|
||||
// Valid cookie (Using maxAge instead of expires)
|
||||
const validCookie = new Bun.Cookie("name", "value", {
|
||||
maxAge: 3600, // 1 hour in seconds
|
||||
});
|
||||
console.log(validCookie.isExpired()); // false
|
||||
|
||||
// Session cookie (no expiration)
|
||||
const sessionCookie = new Bun.Cookie("name", "value");
|
||||
console.log(sessionCookie.isExpired()); // false
|
||||
```
|
||||
|
||||
#### `serialize(): string`
|
||||
|
||||
#### `toString(): string`
|
||||
|
||||
Returns a string representation of the cookie suitable for a `Set-Cookie` header.
|
||||
|
||||
```ts
|
||||
const cookie = new Bun.Cookie("session", "abc123", {
|
||||
domain: "example.com",
|
||||
path: "/admin",
|
||||
expires: new Date(Date.now() + 86400000),
|
||||
secure: true,
|
||||
httpOnly: true,
|
||||
sameSite: "strict",
|
||||
});
|
||||
|
||||
console.log(cookie.serialize());
|
||||
// => "session=abc123; Domain=example.com; Path=/admin; Expires=Sun, 19 Mar 2025 15:03:26 GMT; Secure; HttpOnly; SameSite=strict"
|
||||
console.log(cookie.toString());
|
||||
// => "session=abc123; Domain=example.com; Path=/admin; Expires=Sun, 19 Mar 2025 15:03:26 GMT; Secure; HttpOnly; SameSite=strict"
|
||||
```
|
||||
|
||||
#### `toJSON(): CookieInit`
|
||||
|
||||
Converts the cookie to a plain object suitable for JSON serialization.
|
||||
|
||||
```ts
|
||||
const cookie = new Bun.Cookie("session", "abc123", {
|
||||
secure: true,
|
||||
httpOnly: true,
|
||||
});
|
||||
|
||||
const json = cookie.toJSON();
|
||||
// => {
|
||||
// name: "session",
|
||||
// value: "abc123",
|
||||
// path: "/",
|
||||
// secure: true,
|
||||
// httpOnly: true,
|
||||
// sameSite: "lax",
|
||||
// partitioned: false
|
||||
// }
|
||||
|
||||
// Works with JSON.stringify
|
||||
const jsonString = JSON.stringify(cookie);
|
||||
```
|
||||
|
||||
### Static methods
|
||||
|
||||
#### `Cookie.parse(cookieString: string): Cookie`
|
||||
|
||||
Parses a cookie string into a `Cookie` instance.
|
||||
|
||||
```ts
|
||||
const cookie = Bun.Cookie.parse("name=value; Path=/; Secure; SameSite=Lax");
|
||||
|
||||
console.log(cookie.name); // "name"
|
||||
console.log(cookie.value); // "value"
|
||||
console.log(cookie.path); // "/"
|
||||
console.log(cookie.secure); // true
|
||||
console.log(cookie.sameSite); // "lax"
|
||||
```
|
||||
|
||||
#### `Cookie.from(name: string, value: string, options?: CookieInit): Cookie`
|
||||
|
||||
Factory method to create a cookie.
|
||||
|
||||
```ts
|
||||
const cookie = Bun.Cookie.from("session", "abc123", {
|
||||
httpOnly: true,
|
||||
secure: true,
|
||||
maxAge: 3600,
|
||||
});
|
||||
```
|
||||
|
||||
## Types
|
||||
|
||||
```ts
|
||||
interface CookieInit {
|
||||
name?: string;
|
||||
value?: string;
|
||||
domain?: string;
|
||||
/** Defaults to '/'. To allow the browser to set the path, use an empty string. */
|
||||
path?: string;
|
||||
expires?: number | Date | string;
|
||||
secure?: boolean;
|
||||
/** Defaults to `lax`. */
|
||||
sameSite?: CookieSameSite;
|
||||
httpOnly?: boolean;
|
||||
partitioned?: boolean;
|
||||
maxAge?: number;
|
||||
}
|
||||
|
||||
interface CookieStoreDeleteOptions {
|
||||
name: string;
|
||||
domain?: string | null;
|
||||
path?: string;
|
||||
}
|
||||
|
||||
interface CookieStoreGetOptions {
|
||||
name?: string;
|
||||
url?: string;
|
||||
}
|
||||
|
||||
type CookieSameSite = "strict" | "lax" | "none";
|
||||
|
||||
class Cookie {
|
||||
constructor(name: string, value: string, options?: CookieInit);
|
||||
constructor(cookieString: string);
|
||||
constructor(cookieObject?: CookieInit);
|
||||
|
||||
readonly name: string;
|
||||
value: string;
|
||||
domain?: string;
|
||||
path: string;
|
||||
expires?: Date;
|
||||
secure: boolean;
|
||||
sameSite: CookieSameSite;
|
||||
partitioned: boolean;
|
||||
maxAge?: number;
|
||||
httpOnly: boolean;
|
||||
|
||||
isExpired(): boolean;
|
||||
|
||||
serialize(): string;
|
||||
toString(): string;
|
||||
toJSON(): CookieInit;
|
||||
|
||||
static parse(cookieString: string): Cookie;
|
||||
static from(name: string, value: string, options?: CookieInit): Cookie;
|
||||
}
|
||||
|
||||
class CookieMap implements Iterable<[string, string]> {
|
||||
constructor(init?: string[][] | Record<string, string> | string);
|
||||
|
||||
get(name: string): string | null;
|
||||
|
||||
toSetCookieHeaders(): string[];
|
||||
|
||||
has(name: string): boolean;
|
||||
set(name: string, value: string, options?: CookieInit): void;
|
||||
set(options: CookieInit): void;
|
||||
delete(name: string): void;
|
||||
delete(options: CookieStoreDeleteOptions): void;
|
||||
delete(name: string, options: Omit<CookieStoreDeleteOptions, "name">): void;
|
||||
toJSON(): Record<string, string>;
|
||||
|
||||
readonly size: number;
|
||||
|
||||
entries(): IterableIterator<[string, string]>;
|
||||
keys(): IterableIterator<string>;
|
||||
values(): IterableIterator<string>;
|
||||
forEach(callback: (value: string, key: string, map: CookieMap) => void): void;
|
||||
[Symbol.iterator](): IterableIterator<[string, string]>;
|
||||
}
|
||||
```
|
||||
@@ -61,6 +61,7 @@ Routes in `Bun.serve()` receive a `BunRequest` (which extends [`Request`](https:
|
||||
// Simplified for brevity
|
||||
interface BunRequest<T extends string> extends Request {
|
||||
params: Record<T, string>;
|
||||
readonly cookies: CookieMap;
|
||||
}
|
||||
```
|
||||
|
||||
@@ -934,6 +935,83 @@ const server = Bun.serve({
|
||||
|
||||
Returns `null` for closed requests or Unix domain sockets.
|
||||
|
||||
## Working with Cookies
|
||||
|
||||
Bun provides a built-in API for working with cookies in HTTP requests and responses. The `BunRequest` object includes a `cookies` property that provides a `CookieMap` for easily accessing and manipulating cookies. When using `routes`, `Bun.serve()` automatically tracks `request.cookies.set` and applies them to the response.
|
||||
|
||||
### Reading cookies
|
||||
|
||||
Read cookies from incoming requests using the `cookies` property on the `BunRequest` object:
|
||||
|
||||
```ts
|
||||
Bun.serve({
|
||||
routes: {
|
||||
"/profile": req => {
|
||||
// Access cookies from the request
|
||||
const userId = req.cookies.get("user_id");
|
||||
const theme = req.cookies.get("theme") || "light";
|
||||
|
||||
return Response.json({
|
||||
userId,
|
||||
theme,
|
||||
message: "Profile page",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### Setting cookies
|
||||
|
||||
To set cookies, use the `set` method on the `CookieMap` from the `BunRequest` object.
|
||||
|
||||
```ts
|
||||
Bun.serve({
|
||||
routes: {
|
||||
"/login": req => {
|
||||
const cookies = req.cookies;
|
||||
|
||||
// Set a cookie with various options
|
||||
cookies.set("user_id", "12345", {
|
||||
maxAge: 60 * 60 * 24 * 7, // 1 week
|
||||
httpOnly: true,
|
||||
secure: true,
|
||||
path: "/",
|
||||
});
|
||||
|
||||
// Add a theme preference cookie
|
||||
cookies.set("theme", "dark");
|
||||
|
||||
// Modified cookies from the request are automatically applied to the response
|
||||
return new Response("Login successful");
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
`Bun.serve()` automatically tracks modified cookies from the request and applies them to the response.
|
||||
|
||||
### Deleting cookies
|
||||
|
||||
To delete a cookie, use the `delete` method on the `request.cookies` (`CookieMap`) object:
|
||||
|
||||
```ts
|
||||
Bun.serve({
|
||||
routes: {
|
||||
"/logout": req => {
|
||||
// Delete the user_id cookie
|
||||
req.cookies.delete("user_id", {
|
||||
path: "/",
|
||||
});
|
||||
|
||||
return new Response("Logged out successfully");
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
Deleted cookies become a `Set-Cookie` header on the response with the `maxAge` set to `0` and an empty `value`.
|
||||
|
||||
## Server Metrics
|
||||
|
||||
### server.pendingRequests and server.pendingWebSockets
|
||||
|
||||
514
docs/api/redis.md
Normal file
514
docs/api/redis.md
Normal file
@@ -0,0 +1,514 @@
|
||||
Bun provides native bindings for working with Redis databases with a modern, Promise-based API. The interface is designed to be simple and performant, with built-in connection management, fully typed responses, and TLS support. **New in Bun v1.2.9**
|
||||
|
||||
```ts
|
||||
import { redis } from "bun";
|
||||
|
||||
// Set a key
|
||||
await redis.set("greeting", "Hello from Bun!");
|
||||
|
||||
// Get a key
|
||||
const greeting = await redis.get("greeting");
|
||||
console.log(greeting); // "Hello from Bun!"
|
||||
|
||||
// Increment a counter
|
||||
await redis.set("counter", 0);
|
||||
await redis.incr("counter");
|
||||
|
||||
// Check if a key exists
|
||||
const exists = await redis.exists("greeting");
|
||||
|
||||
// Delete a key
|
||||
await redis.del("greeting");
|
||||
```
|
||||
|
||||
{% features title="Features" %}
|
||||
|
||||
{% icon size=20 name="Bolt" /%} Fast native implementation using Zig and JavaScriptCore
|
||||
|
||||
{% icon size=20 name="Link" /%} Automatic pipelining for better performance
|
||||
|
||||
{% icon size=20 name="EthernetPort" /%} Auto-reconnect with exponential backoff
|
||||
|
||||
{% icon size=20 name="Omega" /%} Support for RESP3 protocol
|
||||
|
||||
{% icon size=20 name="Lock" /%} TLS support
|
||||
|
||||
{% icon size=20 name="Clock" /%} Connection management with configurable timeouts
|
||||
|
||||
{% icon size=20 name="IndentDecrease" /%} Offline command queue
|
||||
|
||||
{% icon size=20 name="Settings" /%} Automatic configuration with environment variables
|
||||
|
||||
{% icon size=20 name="Hash" /%} Support for hash, set, and other Redis data structures
|
||||
|
||||
{% /features %}
|
||||
|
||||
## Getting Started
|
||||
|
||||
To use the Redis client, you first need to create a connection:
|
||||
|
||||
```ts
|
||||
import { redis, RedisClient } from "bun";
|
||||
|
||||
// Using the default client (reads connection info from environment)
|
||||
// process.env.REDIS_URL is used by default
|
||||
await redis.set("hello", "world");
|
||||
const result = await redis.get("hello");
|
||||
|
||||
// Creating a custom client
|
||||
const client = new RedisClient("redis://username:password@localhost:6379");
|
||||
await client.set("counter", "0");
|
||||
await client.incr("counter");
|
||||
```
|
||||
|
||||
By default, the client reads connection information from the following environment variables (in order of precedence):
|
||||
|
||||
- `REDIS_URL`
|
||||
- If not set, defaults to `"redis://localhost:6379"`
|
||||
|
||||
### Connection Lifecycle
|
||||
|
||||
The Redis client automatically handles connections in the background:
|
||||
|
||||
```ts
|
||||
// No connection is made until a command is executed
|
||||
const client = new RedisClient();
|
||||
|
||||
// First command initiates the connection
|
||||
await client.set("key", "value");
|
||||
|
||||
// Connection remains open for subsequent commands
|
||||
await client.get("key");
|
||||
|
||||
// Explicitly close the connection when done
|
||||
client.disconnect();
|
||||
```
|
||||
|
||||
You can also manually control the connection lifecycle:
|
||||
|
||||
```ts
|
||||
const client = new RedisClient();
|
||||
|
||||
// Explicitly connect
|
||||
await client.connect();
|
||||
|
||||
// Run commands
|
||||
await client.set("key", "value");
|
||||
|
||||
// Disconnect when done
|
||||
client.disconnect();
|
||||
```
|
||||
|
||||
## Basic Operations
|
||||
|
||||
### String Operations
|
||||
|
||||
```ts
|
||||
// Set a key
|
||||
await redis.set("user:1:name", "Alice");
|
||||
|
||||
// Get a key
|
||||
const name = await redis.get("user:1:name");
|
||||
|
||||
// Delete a key
|
||||
await redis.del("user:1:name");
|
||||
|
||||
// Check if a key exists
|
||||
const exists = await redis.exists("user:1:name");
|
||||
|
||||
// Set expiration (in seconds)
|
||||
await redis.set("session:123", "active");
|
||||
await redis.expire("session:123", 3600); // expires in 1 hour
|
||||
|
||||
// Get time to live (in seconds)
|
||||
const ttl = await redis.ttl("session:123");
|
||||
```
|
||||
|
||||
### Numeric Operations
|
||||
|
||||
```ts
|
||||
// Set initial value
|
||||
await redis.set("counter", "0");
|
||||
|
||||
// Increment by 1
|
||||
await redis.incr("counter");
|
||||
|
||||
// Decrement by 1
|
||||
await redis.decr("counter");
|
||||
```
|
||||
|
||||
### Hash Operations
|
||||
|
||||
```ts
|
||||
// Set multiple fields in a hash
|
||||
await redis.hmset("user:123", [
|
||||
"name",
|
||||
"Alice",
|
||||
"email",
|
||||
"alice@example.com",
|
||||
"active",
|
||||
"true",
|
||||
]);
|
||||
|
||||
// Get multiple fields from a hash
|
||||
const userFields = await redis.hmget("user:123", ["name", "email"]);
|
||||
console.log(userFields); // ["Alice", "alice@example.com"]
|
||||
|
||||
// Increment a numeric field in a hash
|
||||
await redis.hincrby("user:123", "visits", 1);
|
||||
|
||||
// Increment a float field in a hash
|
||||
await redis.hincrbyfloat("user:123", "score", 1.5);
|
||||
```
|
||||
|
||||
### Set Operations
|
||||
|
||||
```ts
|
||||
// Add member to set
|
||||
await redis.sadd("tags", "javascript");
|
||||
|
||||
// Remove member from set
|
||||
await redis.srem("tags", "javascript");
|
||||
|
||||
// Check if member exists in set
|
||||
const isMember = await redis.sismember("tags", "javascript");
|
||||
|
||||
// Get all members of a set
|
||||
const allTags = await redis.smembers("tags");
|
||||
|
||||
// Get a random member
|
||||
const randomTag = await redis.srandmember("tags");
|
||||
|
||||
// Pop (remove and return) a random member
|
||||
const poppedTag = await redis.spop("tags");
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Command Execution and Pipelining
|
||||
|
||||
The client automatically pipelines commands, improving performance by sending multiple commands in a batch and processing responses as they arrive.
|
||||
|
||||
```ts
|
||||
// Commands are automatically pipelined by default
|
||||
const [infoResult, listResult] = await Promise.all([
|
||||
redis.get("user:1:name"),
|
||||
redis.get("user:2:email"),
|
||||
]);
|
||||
```
|
||||
|
||||
To disable automatic pipelining, you can set the `enableAutoPipelining` option to `false`:
|
||||
|
||||
```ts
|
||||
const client = new RedisClient("redis://localhost:6379", {
|
||||
enableAutoPipelining: false,
|
||||
});
|
||||
```
|
||||
|
||||
### Raw Commands
|
||||
|
||||
When you need to use commands that don't have convenience methods, you can use the `send` method:
|
||||
|
||||
```ts
|
||||
// Run any Redis command
|
||||
const info = await redis.send("INFO", []);
|
||||
|
||||
// LPUSH to a list
|
||||
await redis.send("LPUSH", ["mylist", "value1", "value2"]);
|
||||
|
||||
// Get list range
|
||||
const list = await redis.send("LRANGE", ["mylist", "0", "-1"]);
|
||||
```
|
||||
|
||||
The `send` method allows you to use any Redis command, even ones that don't have dedicated methods in the client. The first argument is the command name, and the second argument is an array of string arguments.
|
||||
|
||||
### Connection Events
|
||||
|
||||
You can register handlers for connection events:
|
||||
|
||||
```ts
|
||||
const client = new RedisClient();
|
||||
|
||||
// Called when successfully connected to Redis server
|
||||
client.onconnect = () => {
|
||||
console.log("Connected to Redis server");
|
||||
};
|
||||
|
||||
// Called when disconnected from Redis server
|
||||
client.onclose = error => {
|
||||
console.error("Disconnected from Redis server:", error);
|
||||
};
|
||||
|
||||
// Manually connect/disconnect
|
||||
await client.connect();
|
||||
client.disconnect();
|
||||
```
|
||||
|
||||
### Connection Status and Monitoring
|
||||
|
||||
```ts
|
||||
// Check if connected
|
||||
console.log(client.connected); // boolean indicating connection status
|
||||
|
||||
// Check amount of data buffered (in bytes)
|
||||
console.log(client.bufferedAmount);
|
||||
```
|
||||
|
||||
### Type Conversion
|
||||
|
||||
The Redis client handles automatic type conversion for Redis responses:
|
||||
|
||||
- Integer responses are returned as JavaScript numbers
|
||||
- Bulk strings are returned as JavaScript strings
|
||||
- Simple strings are returned as JavaScript strings
|
||||
- Null bulk strings are returned as `null`
|
||||
- Array responses are returned as JavaScript arrays
|
||||
- Error responses throw JavaScript errors with appropriate error codes
|
||||
- Boolean responses (RESP3) are returned as JavaScript booleans
|
||||
- Map responses (RESP3) are returned as JavaScript objects
|
||||
- Set responses (RESP3) are returned as JavaScript arrays
|
||||
|
||||
Special handling for specific commands:
|
||||
|
||||
- `EXISTS` returns a boolean instead of a number (1 becomes true, 0 becomes false)
|
||||
- `SISMEMBER` returns a boolean (1 becomes true, 0 becomes false)
|
||||
|
||||
The following commands disable automatic pipelining:
|
||||
|
||||
- `AUTH`
|
||||
- `INFO`
|
||||
- `QUIT`
|
||||
- `EXEC`
|
||||
- `MULTI`
|
||||
- `WATCH`
|
||||
- `SCRIPT`
|
||||
- `SELECT`
|
||||
- `CLUSTER`
|
||||
- `DISCARD`
|
||||
- `UNWATCH`
|
||||
- `PIPELINE`
|
||||
- `SUBSCRIBE`
|
||||
- `UNSUBSCRIBE`
|
||||
- `UNPSUBSCRIBE`
|
||||
|
||||
## Connection Options
|
||||
|
||||
When creating a client, you can pass various options to configure the connection:
|
||||
|
||||
```ts
|
||||
const client = new RedisClient("redis://localhost:6379", {
|
||||
// Connection timeout in milliseconds (default: 10000)
|
||||
connectionTimeout: 5000,
|
||||
|
||||
// Idle timeout in milliseconds (default: 0 = no timeout)
|
||||
idleTimeout: 30000,
|
||||
|
||||
// Whether to automatically reconnect on disconnection (default: true)
|
||||
autoReconnect: true,
|
||||
|
||||
// Maximum number of reconnection attempts (default: 10)
|
||||
maxRetries: 10,
|
||||
|
||||
// Whether to queue commands when disconnected (default: true)
|
||||
enableOfflineQueue: true,
|
||||
|
||||
// Whether to automatically pipeline commands (default: true)
|
||||
enableAutoPipelining: true,
|
||||
|
||||
// TLS options (default: false)
|
||||
tls: true,
|
||||
// Alternatively, provide custom TLS config:
|
||||
// tls: {
|
||||
// rejectUnauthorized: true,
|
||||
// ca: "path/to/ca.pem",
|
||||
// cert: "path/to/cert.pem",
|
||||
// key: "path/to/key.pem",
|
||||
// }
|
||||
});
|
||||
```
|
||||
|
||||
### Reconnection Behavior
|
||||
|
||||
When a connection is lost, the client automatically attempts to reconnect with exponential backoff:
|
||||
|
||||
1. The client starts with a small delay (50ms) and doubles it with each attempt
|
||||
2. Reconnection delay is capped at 2000ms (2 seconds)
|
||||
3. The client attempts to reconnect up to `maxRetries` times (default: 10)
|
||||
4. Commands executed during disconnection are:
|
||||
- Queued if `enableOfflineQueue` is true (default)
|
||||
- Rejected immediately if `enableOfflineQueue` is false
|
||||
|
||||
## Supported URL Formats
|
||||
|
||||
The Redis client supports various URL formats:
|
||||
|
||||
```ts
|
||||
// Standard Redis URL
|
||||
new RedisClient("redis://localhost:6379");
|
||||
new RedisClient("redis://localhost:6379");
|
||||
|
||||
// With authentication
|
||||
new RedisClient("redis://username:password@localhost:6379");
|
||||
|
||||
// With database number
|
||||
new RedisClient("redis://localhost:6379/0");
|
||||
|
||||
// TLS connections
|
||||
new RedisClient("rediss://localhost:6379");
|
||||
new RedisClient("rediss://localhost:6379");
|
||||
new RedisClient("redis+tls://localhost:6379");
|
||||
new RedisClient("redis+tls://localhost:6379");
|
||||
|
||||
// Unix socket connections
|
||||
new RedisClient("redis+unix:///path/to/socket");
|
||||
new RedisClient("redis+unix:///path/to/socket");
|
||||
|
||||
// TLS over Unix socket
|
||||
new RedisClient("redis+tls+unix:///path/to/socket");
|
||||
new RedisClient("redis+tls+unix:///path/to/socket");
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The Redis client throws typed errors for different scenarios:
|
||||
|
||||
```ts
|
||||
try {
|
||||
await redis.get("non-existent-key");
|
||||
} catch (error) {
|
||||
if (error.code === "ERR_REDIS_CONNECTION_CLOSED") {
|
||||
console.error("Connection to Redis server was closed");
|
||||
} else if (error.code === "ERR_REDIS_AUTHENTICATION_FAILED") {
|
||||
console.error("Authentication failed");
|
||||
} else {
|
||||
console.error("Unexpected error:", error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Common error codes:
|
||||
|
||||
- `ERR_REDIS_CONNECTION_CLOSED` - Connection to the server was closed
|
||||
- `ERR_REDIS_AUTHENTICATION_FAILED` - Failed to authenticate with the server
|
||||
- `ERR_REDIS_INVALID_RESPONSE` - Received an invalid response from the server
|
||||
|
||||
## Example Use Cases
|
||||
|
||||
### Caching
|
||||
|
||||
```ts
|
||||
async function getUserWithCache(userId) {
|
||||
const cacheKey = `user:${userId}`;
|
||||
|
||||
// Try to get from cache first
|
||||
const cachedUser = await redis.get(cacheKey);
|
||||
if (cachedUser) {
|
||||
return JSON.parse(cachedUser);
|
||||
}
|
||||
|
||||
// Not in cache, fetch from database
|
||||
const user = await database.getUser(userId);
|
||||
|
||||
// Store in cache for 1 hour
|
||||
await redis.set(cacheKey, JSON.stringify(user));
|
||||
await redis.expire(cacheKey, 3600);
|
||||
|
||||
return user;
|
||||
}
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
```ts
|
||||
async function rateLimit(ip, limit = 100, windowSecs = 3600) {
|
||||
const key = `ratelimit:${ip}`;
|
||||
|
||||
// Increment counter
|
||||
const count = await redis.incr(key);
|
||||
|
||||
// Set expiry if this is the first request in window
|
||||
if (count === 1) {
|
||||
await redis.expire(key, windowSecs);
|
||||
}
|
||||
|
||||
// Check if limit exceeded
|
||||
return {
|
||||
limited: count > limit,
|
||||
remaining: Math.max(0, limit - count),
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Session Storage
|
||||
|
||||
```ts
|
||||
async function createSession(userId, data) {
|
||||
const sessionId = crypto.randomUUID();
|
||||
const key = `session:${sessionId}`;
|
||||
|
||||
// Store session with expiration
|
||||
await redis.hmset(key, [
|
||||
"userId",
|
||||
userId.toString(),
|
||||
"created",
|
||||
Date.now().toString(),
|
||||
"data",
|
||||
JSON.stringify(data),
|
||||
]);
|
||||
await redis.expire(key, 86400); // 24 hours
|
||||
|
||||
return sessionId;
|
||||
}
|
||||
|
||||
async function getSession(sessionId) {
|
||||
const key = `session:${sessionId}`;
|
||||
|
||||
// Get session data
|
||||
const exists = await redis.exists(key);
|
||||
if (!exists) return null;
|
||||
|
||||
const [userId, created, data] = await redis.hmget(key, [
|
||||
"userId",
|
||||
"created",
|
||||
"data",
|
||||
]);
|
||||
|
||||
return {
|
||||
userId: Number(userId),
|
||||
created: Number(created),
|
||||
data: JSON.parse(data),
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
Bun's Redis client is implemented in Zig and uses the Redis Serialization Protocol (RESP3). It manages connections efficiently and provides automatic reconnection with exponential backoff.
|
||||
|
||||
The client supports pipelining commands, meaning multiple commands can be sent without waiting for the replies to previous commands. This significantly improves performance when sending multiple commands in succession.
|
||||
|
||||
### RESP3 Protocol Support
|
||||
|
||||
Bun's Redis client uses the newer RESP3 protocol by default, which provides more data types and features compared to RESP2:
|
||||
|
||||
- Better error handling with typed errors
|
||||
- Native Boolean responses
|
||||
- Map/Dictionary responses (key-value objects)
|
||||
- Set responses
|
||||
- Double (floating point) values
|
||||
- BigNumber support for large integer values
|
||||
|
||||
When connecting to Redis servers using older versions that don't support RESP3, the client automatically fallbacks to compatible modes.
|
||||
|
||||
## Limitations and Future Plans
|
||||
|
||||
Current limitations of the Redis client we are planning to address in future versions:
|
||||
|
||||
- [ ] No dedicated API for pub/sub functionality (though you can use the raw command API)
|
||||
- [ ] Transactions (MULTI/EXEC) must be done through raw commands for now
|
||||
- [ ] Streams are supported but without dedicated methods
|
||||
|
||||
Unsupported features:
|
||||
|
||||
- Redis Sentinel
|
||||
- Redis Cluster
|
||||
@@ -253,6 +253,19 @@ const proc = Bun.spawn({
|
||||
|
||||
The `killSignal` option also controls which signal is sent when an AbortSignal is aborted.
|
||||
|
||||
## Using maxBuffer
|
||||
|
||||
For spawnSync, you can limit the maximum number of bytes of output before the process is killed:
|
||||
|
||||
```ts
|
||||
// KIll 'yes' after it emits over 100 bytes of output
|
||||
const result = Bun.spawnSync({
|
||||
cmd: ["yes"], // or ["bun", "exec", "yes"] on windows
|
||||
maxBuffer: 100,
|
||||
});
|
||||
// process exits
|
||||
```
|
||||
|
||||
## Inter-process communication (IPC)
|
||||
|
||||
Bun supports direct inter-process communication channel between two `bun` processes. To receive messages from a spawned Bun subprocess, specify an `ipc` handler.
|
||||
@@ -423,6 +436,7 @@ namespace SpawnOptions {
|
||||
signal?: AbortSignal;
|
||||
timeout?: number;
|
||||
killSignal?: string | number;
|
||||
maxBuffer?: number;
|
||||
}
|
||||
|
||||
type Readable =
|
||||
|
||||
@@ -240,7 +240,7 @@ const result = await sql.unsafe(
|
||||
|
||||
### Execute and Cancelling Queries
|
||||
|
||||
Bun's SQL is lazy that means its will only start executing when awaited or executed with `.execute()`.
|
||||
Bun's SQL is lazy, which means it will only start executing when awaited or executed with `.execute()`.
|
||||
You can cancel a query that is currently executing by calling the `cancel()` method on the query object.
|
||||
|
||||
```ts
|
||||
|
||||
@@ -15,8 +15,8 @@ Below is the full set of recommended `compilerOptions` for a Bun project. With t
|
||||
```jsonc
|
||||
{
|
||||
"compilerOptions": {
|
||||
// Enable latest features
|
||||
"lib": ["ESNext", "DOM"],
|
||||
// Environment setup & latest features
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"moduleDetection": "force",
|
||||
@@ -33,11 +33,12 @@ Below is the full set of recommended `compilerOptions` for a Bun project. With t
|
||||
"strict": true,
|
||||
"skipLibCheck": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noUncheckedIndexedAccess": true,
|
||||
|
||||
// Some stricter flags
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"noPropertyAccessFromIndexSignature": true,
|
||||
// Some stricter flags (disabled by default)
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"noPropertyAccessFromIndexSignature": false,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
37
docs/nav.ts
37
docs/nav.ts
@@ -265,12 +265,25 @@ export default {
|
||||
page("test/time", "Dates and times", {
|
||||
description: "Control the date & time in your tests for more reliable and deterministic tests",
|
||||
}),
|
||||
page("test/dom", "DOM testing", {
|
||||
description: "Write headless tests for UI and React/Vue/Svelte/Lit components with happy-dom",
|
||||
}),
|
||||
|
||||
page("test/coverage", "Code coverage", {
|
||||
description: "Generate code coverage reports with `bun test --coverage`",
|
||||
}),
|
||||
page("test/reporters", "Test reporters", {
|
||||
description: "Add a junit reporter to your test runs",
|
||||
}),
|
||||
page("test/configuration", "Test configuration", {
|
||||
description: "Configure the test runner with bunfig.toml",
|
||||
}),
|
||||
page("test/runtime-behavior", "Runtime behavior", {
|
||||
description: "Learn how the test runner affects Bun's runtime behavior",
|
||||
}),
|
||||
page("test/discovery", "Finding tests", {
|
||||
description: "Learn how the test runner discovers tests",
|
||||
}),
|
||||
page("test/dom", "DOM testing", {
|
||||
description: "Write headless tests for UI and React/Vue/Svelte/Lit components with happy-dom",
|
||||
}),
|
||||
|
||||
divider("Package runner"),
|
||||
page("cli/bunx", "`bunx`", {
|
||||
@@ -331,6 +344,9 @@ export default {
|
||||
page("api/file-io", "File I/O", {
|
||||
description: `Read and write files fast with Bun's heavily optimized file system API.`,
|
||||
}), // "`Bun.write`"),
|
||||
page("api/redis", "Redis client", {
|
||||
description: `Bun provides a fast, native Redis client with automatic command pipelining for better performance.`,
|
||||
}),
|
||||
page("api/import-meta", "import.meta", {
|
||||
description: `Module-scoped metadata and utilities`,
|
||||
}), // "`bun:sqlite`"),
|
||||
@@ -355,24 +371,24 @@ export default {
|
||||
page("api/spawn", "Child processes", {
|
||||
description: `Spawn sync and async child processes with easily configurable input and output streams.`,
|
||||
}), // "`Bun.spawn`"),
|
||||
page("api/transpiler", "Transpiler", {
|
||||
description: `Bun exposes its internal transpiler as a pluggable API.`,
|
||||
}), // "`Bun.Transpiler`"),
|
||||
page("api/html-rewriter", "HTMLRewriter", {
|
||||
description: `Parse and transform HTML with Bun's native HTMLRewriter API, inspired by Cloudflare Workers.`,
|
||||
}), // "`HTMLRewriter`"),
|
||||
page("api/hashing", "Hashing", {
|
||||
description: `Native support for a range of fast hashing algorithms.`,
|
||||
}), // "`Bun.serve`"),
|
||||
page("api/console", "Console", {
|
||||
description: `Bun implements a Node.js-compatible \`console\` object with colorized output and deep pretty-printing.`,
|
||||
}), // "`Node-API`"),
|
||||
page("api/cookie", "Cookie", {
|
||||
description: "Bun's native Cookie API simplifies working with HTTP cookies.",
|
||||
}), // "`Node-API`"),
|
||||
page("api/ffi", "FFI", {
|
||||
description: `Call native code from JavaScript with Bun's foreign function interface (FFI) API.`,
|
||||
}), // "`bun:ffi`"),
|
||||
page("api/cc", "C Compiler", {
|
||||
description: `Build & run native C from JavaScript with Bun's native C compiler API`,
|
||||
}), // "`bun:ffi`"),
|
||||
page("api/html-rewriter", "HTMLRewriter", {
|
||||
description: `Parse and transform HTML with Bun's native HTMLRewriter API, inspired by Cloudflare Workers.`,
|
||||
}), // "`HTMLRewriter`"),
|
||||
page("api/test", "Testing", {
|
||||
description: `Bun's built-in test runner is fast and uses Jest-compatible syntax.`,
|
||||
}), // "`bun:test`"),
|
||||
@@ -398,6 +414,9 @@ export default {
|
||||
page("api/color", "Color", {
|
||||
description: `Bun's color function leverages Bun's CSS parser for parsing, normalizing, and converting colors from user input to a variety of output formats.`,
|
||||
}), // "`Color`"),
|
||||
page("api/transpiler", "Transpiler", {
|
||||
description: `Bun exposes its internal transpiler as a pluggable API.`,
|
||||
}), // "`Bun.Transpiler`"),
|
||||
|
||||
// divider("Dev Server"),
|
||||
// page("bun-dev", "Vanilla"),
|
||||
|
||||
@@ -104,7 +104,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:crypto`](https://nodejs.org/api/crypto.html)
|
||||
|
||||
🟡 Missing `ECDH` `checkPrime` `checkPrimeSync` `generatePrime` `generatePrimeSync` `hkdf` `hkdfSync` `secureHeapUsed` `setEngine` `setFips`
|
||||
🟡 Missing `secureHeapUsed` `setEngine` `setFips`
|
||||
|
||||
Some methods are not optimized yet.
|
||||
|
||||
@@ -118,7 +118,7 @@ Some methods are not optimized yet.
|
||||
|
||||
### [`node:module`](https://nodejs.org/api/module.html)
|
||||
|
||||
🟡 Missing `runMain` `syncBuiltinESMExports`, `Module#load()`. Overriding `require.cache` is supported for ESM & CJS modules. `module._extensions`, `module._pathCache`, `module._cache` are no-ops. `module.register` is not implemented and we recommend using a [`Bun.plugin`](https://bun.sh/docs/runtime/plugins) in the meantime.
|
||||
🟡 Missing `syncBuiltinESMExports`, `Module#load()`. Overriding `require.cache` is supported for ESM & CJS modules. `module._extensions`, `module._pathCache`, `module._cache` are no-ops. `module.register` is not implemented and we recommend using a [`Bun.plugin`](https://bun.sh/docs/runtime/plugins) in the meantime.
|
||||
|
||||
### [`node:net`](https://nodejs.org/api/net.html)
|
||||
|
||||
@@ -378,8 +378,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
### [`require()`](https://nodejs.org/api/globals.html#require)
|
||||
|
||||
🟢 Fully implemented, including [`require.main`](https://nodejs.org/api/modules.html#requiremain), [`require.cache`](https://nodejs.org/api/modules.html#requirecache), [`require.resolve`](https://nodejs.org/api/modules.html#requireresolverequest-options). `require.extensions` is a stub.
|
||||
|
||||
🟢 Fully implemented, including [`require.main`](https://nodejs.org/api/modules.html#requiremain), [`require.cache`](https://nodejs.org/api/modules.html#requirecache), [`require.resolve`](https://nodejs.org/api/modules.html#requireresolverequest-options).
|
||||
### [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response)
|
||||
|
||||
🟢 Fully implemented.
|
||||
|
||||
87
docs/test/configuration.md
Normal file
87
docs/test/configuration.md
Normal file
@@ -0,0 +1,87 @@
|
||||
Configure `bun test` via `bunfig.toml` file and command-line options. This page documents the available configuration options for `bun test`.
|
||||
|
||||
## bunfig.toml options
|
||||
|
||||
You can configure `bun test` behavior by adding a `[test]` section to your `bunfig.toml` file:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
# Options go here
|
||||
```
|
||||
|
||||
### Test discovery
|
||||
|
||||
#### root
|
||||
|
||||
The `root` option specifies a root directory for test discovery, overriding the default behavior of scanning from the project root.
|
||||
|
||||
```toml
|
||||
[test]
|
||||
root = "src" # Only scan for tests in the src directory
|
||||
```
|
||||
|
||||
### Reporters
|
||||
|
||||
#### reporter.junit
|
||||
|
||||
Configure the JUnit reporter output file path directly in the config file:
|
||||
|
||||
```toml
|
||||
[test.reporter]
|
||||
junit = "path/to/junit.xml" # Output path for JUnit XML report
|
||||
```
|
||||
|
||||
This complements the `--reporter=junit` and `--reporter-outfile` CLI flags.
|
||||
|
||||
### Memory usage
|
||||
|
||||
#### smol
|
||||
|
||||
Enable the `--smol` memory-saving mode specifically for the test runner:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
smol = true # Reduce memory usage during test runs
|
||||
```
|
||||
|
||||
This is equivalent to using the `--smol` flag on the command line.
|
||||
|
||||
### Coverage options
|
||||
|
||||
In addition to the options documented in the [coverage documentation](./coverage.md), the following options are available:
|
||||
|
||||
#### coverageSkipTestFiles
|
||||
|
||||
Exclude files matching test patterns (e.g., \*.test.ts) from the coverage report:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
coverageSkipTestFiles = true # Exclude test files from coverage reports
|
||||
```
|
||||
|
||||
#### coverageThreshold (Object form)
|
||||
|
||||
The coverage threshold can be specified either as a number (as shown in the coverage documentation) or as an object with specific thresholds:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
# Set specific thresholds for different coverage metrics
|
||||
coverageThreshold = { lines = 0.9, functions = 0.8, statements = 0.85 }
|
||||
```
|
||||
|
||||
Setting any of these enables `fail_on_low_coverage`, causing the test run to fail if coverage is below the threshold.
|
||||
|
||||
#### coverageIgnoreSourcemaps
|
||||
|
||||
Internally, Bun transpiles every file. That means code coverage must also go through sourcemaps before they can be reported. We expose this as a flag to allow you to opt out of this behavior, but it will be confusing because during the transpilation process, Bun may move code around and change variable names. This option is mostly useful for debugging coverage issues.
|
||||
|
||||
```toml
|
||||
[test]
|
||||
coverageIgnoreSourcemaps = true # Don't use sourcemaps for coverage analysis
|
||||
```
|
||||
|
||||
When using this option, you probably want to stick a `// @bun` comment at the top of the source file to opt out of the transpilation process.
|
||||
|
||||
### Install settings inheritance
|
||||
|
||||
The `bun test` command inherits relevant network and installation configuration (registry, cafile, prefer, exact, etc.) from the `[install]` section of bunfig.toml. This is important if tests need to interact with private registries or require specific install behaviors triggered during the test run.
|
||||
@@ -52,9 +52,22 @@ It is possible to specify a coverage threshold in `bunfig.toml`. If your test su
|
||||
coverageThreshold = 0.9
|
||||
|
||||
# to set different thresholds for lines and functions
|
||||
coverageThreshold = { lines = 0.9, functions = 0.9 }
|
||||
coverageThreshold = { lines = 0.9, functions = 0.9, statements = 0.9 }
|
||||
```
|
||||
|
||||
Setting any of these thresholds enables `fail_on_low_coverage`, causing the test run to fail if coverage is below the threshold.
|
||||
|
||||
### Exclude test files from coverage
|
||||
|
||||
By default, test files themselves are included in coverage reports. You can exclude them with:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
coverageSkipTestFiles = true # default false
|
||||
```
|
||||
|
||||
This will exclude files matching test patterns (e.g., _.test.ts, _\_spec.js) from the coverage report.
|
||||
|
||||
### Sourcemaps
|
||||
|
||||
Internally, Bun transpiles all files by default, so Bun automatically generates an internal [source map](https://web.dev/source-maps/) that maps lines of your original source code onto Bun's internal representation. If for any reason you want to disable this, set `test.coverageIgnoreSourcemaps` to `true`; this will rarely be desirable outside of advanced use cases.
|
||||
@@ -64,6 +77,14 @@ Internally, Bun transpiles all files by default, so Bun automatically generates
|
||||
coverageIgnoreSourcemaps = true # default false
|
||||
```
|
||||
|
||||
### Coverage defaults
|
||||
|
||||
By default, coverage reports:
|
||||
|
||||
1. Exclude `node_modules` directories
|
||||
2. Exclude files loaded via non-JS/TS loaders (e.g., .css, .txt) unless a custom JS loader is specified
|
||||
3. Include test files themselves (can be disabled with `coverageSkipTestFiles = true` as shown above)
|
||||
|
||||
### Coverage reporters
|
||||
|
||||
By default, coverage reports will be printed to the console.
|
||||
|
||||
85
docs/test/discovery.md
Normal file
85
docs/test/discovery.md
Normal file
@@ -0,0 +1,85 @@
|
||||
bun test's file discovery mechanism determines which files to run as tests. Understanding how it works helps you structure your test files effectively.
|
||||
|
||||
## Default Discovery Logic
|
||||
|
||||
By default, `bun test` recursively searches the project directory for files that match specific patterns:
|
||||
|
||||
- `*.test.{js|jsx|ts|tsx}` - Files ending with `.test.js`, `.test.jsx`, `.test.ts`, or `.test.tsx`
|
||||
- `*_test.{js|jsx|ts|tsx}` - Files ending with `_test.js`, `_test.jsx`, `_test.ts`, or `_test.tsx`
|
||||
- `*.spec.{js|jsx|ts|tsx}` - Files ending with `.spec.js`, `.spec.jsx`, `.spec.ts`, or `.spec.tsx`
|
||||
- `*_spec.{js|jsx|ts|tsx}` - Files ending with `_spec.js`, `_spec.jsx`, `_spec.ts`, or `_spec.tsx`
|
||||
|
||||
## Exclusions
|
||||
|
||||
By default, Bun test ignores:
|
||||
|
||||
- `node_modules` directories
|
||||
- Hidden directories (those starting with a period `.`)
|
||||
- Files that don't have JavaScript-like extensions (based on available loaders)
|
||||
|
||||
## Customizing Test Discovery
|
||||
|
||||
### Position Arguments as Filters
|
||||
|
||||
You can filter which test files run by passing additional positional arguments to `bun test`:
|
||||
|
||||
```bash
|
||||
$ bun test <filter> <filter> ...
|
||||
```
|
||||
|
||||
Any test file with a path that contains one of the filters will run. These filters are simple substring matches, not glob patterns.
|
||||
|
||||
For example, to run all tests in a `utils` directory:
|
||||
|
||||
```bash
|
||||
$ bun test utils
|
||||
```
|
||||
|
||||
This would match files like `src/utils/string.test.ts` and `lib/utils/array_test.js`.
|
||||
|
||||
### Specifying Exact File Paths
|
||||
|
||||
To run a specific file in the test runner, make sure the path starts with `./` or `/` to distinguish it from a filter name:
|
||||
|
||||
```bash
|
||||
$ bun test ./test/specific-file.test.ts
|
||||
```
|
||||
|
||||
### Filter by Test Name
|
||||
|
||||
To filter tests by name rather than file path, use the `-t`/`--test-name-pattern` flag with a regex pattern:
|
||||
|
||||
```sh
|
||||
# run all tests with "addition" in the name
|
||||
$ bun test --test-name-pattern addition
|
||||
```
|
||||
|
||||
The pattern is matched against a concatenated string of the test name prepended with the labels of all its parent describe blocks, separated by spaces. For example, a test defined as:
|
||||
|
||||
```js
|
||||
describe("Math", () => {
|
||||
describe("operations", () => {
|
||||
test("should add correctly", () => {
|
||||
// ...
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
Would be matched against the string "Math operations should add correctly".
|
||||
|
||||
### Changing the Root Directory
|
||||
|
||||
By default, Bun looks for test files starting from the current working directory. You can change this with the `root` option in your `bunfig.toml`:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
root = "src" # Only scan for tests in the src directory
|
||||
```
|
||||
|
||||
## Execution Order
|
||||
|
||||
Tests are run in the following order:
|
||||
|
||||
1. Test files are executed sequentially (not in parallel)
|
||||
2. Within each file, tests run sequentially based on their definition order
|
||||
@@ -56,9 +56,9 @@ The following properties and methods are implemented on mock functions.
|
||||
- [x] [mockFn.mock.instances](https://jestjs.io/docs/mock-function-api#mockfnmockinstances)
|
||||
- [x] [mockFn.mock.contexts](https://jestjs.io/docs/mock-function-api#mockfnmockcontexts)
|
||||
- [x] [mockFn.mock.lastCall](https://jestjs.io/docs/mock-function-api#mockfnmocklastcall)
|
||||
- [x] [mockFn.mockClear()](https://jestjs.io/docs/mock-function-api#mockfnmockclear)
|
||||
- [x] [mockFn.mockReset()](https://jestjs.io/docs/mock-function-api#mockfnmockreset)
|
||||
- [x] [mockFn.mockRestore()](https://jestjs.io/docs/mock-function-api#mockfnmockrestore)
|
||||
- [x] [mockFn.mockClear()](https://jestjs.io/docs/mock-function-api#mockfnmockclear) - Clears call history
|
||||
- [x] [mockFn.mockReset()](https://jestjs.io/docs/mock-function-api#mockfnmockreset) - Clears call history and removes implementation
|
||||
- [x] [mockFn.mockRestore()](https://jestjs.io/docs/mock-function-api#mockfnmockrestore) - Restores original implementation
|
||||
- [x] [mockFn.mockImplementation(fn)](https://jestjs.io/docs/mock-function-api#mockfnmockimplementationfn)
|
||||
- [x] [mockFn.mockImplementationOnce(fn)](https://jestjs.io/docs/mock-function-api#mockfnmockimplementationoncefn)
|
||||
- [x] [mockFn.mockName(name)](https://jestjs.io/docs/mock-function-api#mockfnmocknamename)
|
||||
@@ -197,7 +197,59 @@ After resolution, the mocked module is stored in the ES Module registry **and**
|
||||
|
||||
The callback function is called lazily, only if the module is imported or required. This means that you can use `mock.module()` to mock modules that don't exist yet, and it means that you can use `mock.module()` to mock modules that are imported by other modules.
|
||||
|
||||
## Restore all function mocks to their original values with `mock.restore()`
|
||||
### Module Mock Implementation Details
|
||||
|
||||
Understanding how `mock.module()` works helps you use it more effectively:
|
||||
|
||||
1. **Cache Interaction**: Module mocks interacts with both ESM and CommonJS module caches.
|
||||
|
||||
2. **Lazy Evaluation**: The mock factory callback is only evaluated when the module is actually imported or required.
|
||||
|
||||
3. **Path Resolution**: Bun automatically resolves the module specifier as though you were doing an import, supporting:
|
||||
- Relative paths (`'./module'`)
|
||||
- Absolute paths (`'/path/to/module'`)
|
||||
- Package names (`'lodash'`)
|
||||
|
||||
4. **Import Timing Effects**:
|
||||
- When mocking before first import: No side effects from the original module occur
|
||||
- When mocking after import: The original module's side effects have already happened
|
||||
- For this reason, using `--preload` is recommended for mocks that need to prevent side effects
|
||||
|
||||
5. **Live Bindings**: Mocked ESM modules maintain live bindings, so changing the mock will update all existing imports
|
||||
|
||||
## Global Mock Functions
|
||||
|
||||
### Clear all mocks with `mock.clearAllMocks()`
|
||||
|
||||
Reset all mock function state (calls, results, etc.) without restoring their original implementation:
|
||||
|
||||
```ts
|
||||
import { expect, mock, test } from "bun:test";
|
||||
|
||||
const random1 = mock(() => Math.random());
|
||||
const random2 = mock(() => Math.random());
|
||||
|
||||
test("clearing all mocks", () => {
|
||||
random1();
|
||||
random2();
|
||||
|
||||
expect(random1).toHaveBeenCalledTimes(1);
|
||||
expect(random2).toHaveBeenCalledTimes(1);
|
||||
|
||||
mock.clearAllMocks();
|
||||
|
||||
expect(random1).toHaveBeenCalledTimes(0);
|
||||
expect(random2).toHaveBeenCalledTimes(0);
|
||||
|
||||
// Note: implementations are preserved
|
||||
expect(typeof random1()).toBe("number");
|
||||
expect(typeof random2()).toBe("number");
|
||||
});
|
||||
```
|
||||
|
||||
This resets the `.mock.calls`, `.mock.instances`, `.mock.contexts`, and `.mock.results` properties of all mocks, but unlike `mock.restore()`, it does not restore the original implementation.
|
||||
|
||||
### Restore all function mocks with `mock.restore()`
|
||||
|
||||
Instead of manually restoring each mock individually with `mockFn.mockRestore()`, restore all mocks with one command by calling `mock.restore()`. Doing so does not reset the value of modules overridden with `mock.module()`.
|
||||
|
||||
@@ -234,3 +286,28 @@ test('foo, bar, baz', () => {
|
||||
expect(bazSpy).toBe('baz');
|
||||
});
|
||||
```
|
||||
|
||||
## Vitest Compatibility
|
||||
|
||||
For added compatibility with tests written for [Vitest](https://vitest.dev/), Bun provides the `vi` global object as an alias for parts of the Jest mocking API:
|
||||
|
||||
```ts
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
// Using the 'vi' alias similar to Vitest
|
||||
test("vitest compatibility", () => {
|
||||
const mockFn = vi.fn(() => 42);
|
||||
|
||||
mockFn();
|
||||
expect(mockFn).toHaveBeenCalled();
|
||||
|
||||
// The following functions are available on the vi object:
|
||||
// vi.fn
|
||||
// vi.spyOn
|
||||
// vi.mock
|
||||
// vi.restoreAllMocks
|
||||
// vi.clearAllMocks
|
||||
});
|
||||
```
|
||||
|
||||
This makes it easier to port tests from Vitest to Bun without having to rewrite all your mocks.
|
||||
|
||||
108
docs/test/reporters.md
Normal file
108
docs/test/reporters.md
Normal file
@@ -0,0 +1,108 @@
|
||||
bun test supports different output formats through reporters. This document covers both built-in reporters and how to implement your own custom reporters.
|
||||
|
||||
## Built-in Reporters
|
||||
|
||||
### Default Console Reporter
|
||||
|
||||
By default, bun test outputs results to the console in a human-readable format:
|
||||
|
||||
```sh
|
||||
test/package-json-lint.test.ts:
|
||||
✓ test/package.json [0.88ms]
|
||||
✓ test/js/third_party/grpc-js/package.json [0.18ms]
|
||||
✓ test/js/third_party/svelte/package.json [0.21ms]
|
||||
✓ test/js/third_party/express/package.json [1.05ms]
|
||||
|
||||
4 pass
|
||||
0 fail
|
||||
4 expect() calls
|
||||
Ran 4 tests in 1.44ms
|
||||
```
|
||||
|
||||
When a terminal doesn't support colors, the output avoids non-ascii characters:
|
||||
|
||||
```sh
|
||||
test/package-json-lint.test.ts:
|
||||
(pass) test/package.json [0.48ms]
|
||||
(pass) test/js/third_party/grpc-js/package.json [0.10ms]
|
||||
(pass) test/js/third_party/svelte/package.json [0.04ms]
|
||||
(pass) test/js/third_party/express/package.json [0.04ms]
|
||||
|
||||
4 pass
|
||||
0 fail
|
||||
4 expect() calls
|
||||
Ran 4 tests across 1 files. [0.66ms]
|
||||
```
|
||||
|
||||
### JUnit XML Reporter
|
||||
|
||||
For CI/CD environments, Bun supports generating JUnit XML reports. JUnit XML is a widely-adopted format for test results that can be parsed by many CI/CD systems, including GitLab, Jenkins, and others.
|
||||
|
||||
#### Using the JUnit Reporter
|
||||
|
||||
To generate a JUnit XML report, use the `--reporter=junit` flag along with `--reporter-outfile` to specify the output file:
|
||||
|
||||
```sh
|
||||
$ bun test --reporter=junit --reporter-outfile=./junit.xml
|
||||
```
|
||||
|
||||
This continues to output to the console as usual while also writing the JUnit XML report to the specified path at the end of the test run.
|
||||
|
||||
#### Configuring via bunfig.toml
|
||||
|
||||
You can also configure the JUnit reporter in your `bunfig.toml` file:
|
||||
|
||||
```toml
|
||||
[test.reporter]
|
||||
junit = "path/to/junit.xml" # Output path for JUnit XML report
|
||||
```
|
||||
|
||||
#### Environment Variables in JUnit Reports
|
||||
|
||||
The JUnit reporter automatically includes environment information as `<properties>` in the XML output. This can be helpful for tracking test runs in CI environments.
|
||||
|
||||
Specifically, it includes the following environment variables when available:
|
||||
|
||||
| Environment Variable | Property Name | Description |
|
||||
| ----------------------------------------------------------------------- | ------------- | ---------------------- |
|
||||
| `GITHUB_RUN_ID`, `GITHUB_SERVER_URL`, `GITHUB_REPOSITORY`, `CI_JOB_URL` | `ci` | CI build information |
|
||||
| `GITHUB_SHA`, `CI_COMMIT_SHA`, `GIT_SHA` | `commit` | Git commit identifiers |
|
||||
| System hostname | `hostname` | Machine hostname |
|
||||
|
||||
This makes it easier to track which environment and commit a particular test run was for.
|
||||
|
||||
#### Current Limitations
|
||||
|
||||
The JUnit reporter currently has a few limitations that will be addressed in future updates:
|
||||
|
||||
- `stdout` and `stderr` output from individual tests are not included in the report
|
||||
- Precise timestamp fields per test case are not included
|
||||
|
||||
### GitHub Actions reporter
|
||||
|
||||
Bun test automatically detects when it's running inside GitHub Actions and emits GitHub Actions annotations to the console directly. No special configuration is needed beyond installing Bun and running `bun test`.
|
||||
|
||||
For a GitHub Actions workflow configuration example, see the [CI/CD integration](../cli/test.md#cicd-integration) section of the CLI documentation.
|
||||
|
||||
## Custom Reporters
|
||||
|
||||
Bun allows developers to implement custom test reporters by extending the WebKit Inspector Protocol with additional testing-specific domains.
|
||||
|
||||
### Inspector Protocol for Testing
|
||||
|
||||
To support test reporting, Bun extends the standard WebKit Inspector Protocol with two custom domains:
|
||||
|
||||
1. **TestReporter**: Reports test discovery, execution start, and completion events
|
||||
2. **LifecycleReporter**: Reports errors and exceptions during test execution
|
||||
|
||||
These extensions allow you to build custom reporting tools that can receive detailed information about test execution in real-time.
|
||||
|
||||
### Key Events
|
||||
|
||||
Custom reporters can listen for these key events:
|
||||
|
||||
- `TestReporter.found`: Emitted when a test is discovered
|
||||
- `TestReporter.start`: Emitted when a test starts running
|
||||
- `TestReporter.end`: Emitted when a test completes
|
||||
- `Console.messageAdded`: Emitted when console output occurs during a test
|
||||
- `LifecycleReporter.error`: Emitted when an error or exception occurs
|
||||
93
docs/test/runtime-behavior.md
Normal file
93
docs/test/runtime-behavior.md
Normal file
@@ -0,0 +1,93 @@
|
||||
`bun test` is deeply integrated with Bun's runtime. This is part of what makes `bun test` fast and simple to use.
|
||||
|
||||
#### `$NODE_ENV` environment variable
|
||||
|
||||
`bun test` automatically sets `$NODE_ENV` to `"test"` unless it's already set in the environment or via .env files. This is standard behavior for most test runners and helps ensure consistent test behavior.
|
||||
|
||||
```ts
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("NODE_ENV is set to test", () => {
|
||||
expect(process.env.NODE_ENV).toBe("test");
|
||||
});
|
||||
```
|
||||
|
||||
#### `$TZ` environment variable
|
||||
|
||||
By default, all `bun test` runs use UTC (`Etc/UTC`) as the time zone unless overridden by the `TZ` environment variable. This ensures consistent date and time behavior across different development environments.
|
||||
|
||||
#### Test Timeouts
|
||||
|
||||
Each test has a default timeout of 5000ms (5 seconds) if not explicitly overridden. Tests that exceed this timeout will fail. This can be changed globally with the `--timeout` flag or per-test as the third parameter to the test function.
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Unhandled Errors
|
||||
|
||||
`bun test` tracks unhandled promise rejections and errors that occur between tests. If such errors occur, the final exit code will be non-zero (specifically, the count of such errors), even if all tests pass.
|
||||
|
||||
This helps catch errors in asynchronous code that might otherwise go unnoticed:
|
||||
|
||||
```ts
|
||||
import { test } from "bun:test";
|
||||
|
||||
test("test 1", () => {
|
||||
// This test passes
|
||||
});
|
||||
|
||||
// This error happens outside any test
|
||||
setTimeout(() => {
|
||||
throw new Error("Unhandled error");
|
||||
}, 0);
|
||||
|
||||
test("test 2", () => {
|
||||
// This test also passes
|
||||
});
|
||||
|
||||
// The test run will still fail with a non-zero exit code
|
||||
// because of the unhandled error
|
||||
```
|
||||
|
||||
Internally, this occurs with a higher precedence than `process.on("unhandledRejection")` or `process.on("uncaughtException")`, which makes it simpler to integrate with existing code.
|
||||
|
||||
## Using General CLI Flags with Tests
|
||||
|
||||
Several Bun CLI flags can be used with `bun test` to modify its behavior:
|
||||
|
||||
### Memory Usage
|
||||
|
||||
- `--smol`: Reduces memory usage for the test runner VM
|
||||
|
||||
### Debugging
|
||||
|
||||
- `--inspect`, `--inspect-brk`: Attaches the debugger to the test runner process
|
||||
|
||||
### Module Loading
|
||||
|
||||
- `--preload`: Runs scripts before test files (useful for global setup/mocks)
|
||||
- `--define`: Sets compile-time constants
|
||||
- `--loader`: Configures custom loaders
|
||||
- `--tsconfig-override`: Uses a different tsconfig
|
||||
- `--conditions`: Sets package.json conditions for module resolution
|
||||
- `--env-file`: Loads environment variables for tests
|
||||
|
||||
### Installation-related Flags
|
||||
|
||||
- `--prefer-offline`, `--frozen-lockfile`, etc.: Affect any network requests or auto-installs during test execution
|
||||
|
||||
## Watch and Hot Reloading
|
||||
|
||||
When running `bun test` with the `--watch` flag, the test runner will watch for file changes and re-run affected tests.
|
||||
|
||||
The `--hot` flag provides similar functionality but is more aggressive about trying to preserve state between runs. For most test scenarios, `--watch` is the recommended option.
|
||||
|
||||
## Global Variables
|
||||
|
||||
The following globals are automatically available in test files without importing (though they can be imported from `bun:test` if preferred):
|
||||
|
||||
- `test`, `it`: Define tests
|
||||
- `describe`: Group tests
|
||||
- `expect`: Make assertions
|
||||
- `beforeAll`, `beforeEach`, `afterAll`, `afterEach`: Lifecycle hooks
|
||||
- `jest`: Jest global object
|
||||
- `vi`: Vitest compatibility alias for common jest methods
|
||||
@@ -1,3 +1,7 @@
|
||||
Snapshot testing saves the output of a value and compares it against future test runs. This is particularly useful for UI components, complex objects, or any output that needs to remain consistent.
|
||||
|
||||
## Basic snapshots
|
||||
|
||||
Snapshot tests are written using the `.toMatchSnapshot()` matcher:
|
||||
|
||||
```ts
|
||||
@@ -13,3 +17,52 @@ The first time this test is run, the argument to `expect` will be serialized and
|
||||
```bash
|
||||
$ bun test --update-snapshots
|
||||
```
|
||||
|
||||
## Inline snapshots
|
||||
|
||||
For smaller values, you can use inline snapshots with `.toMatchInlineSnapshot()`. These snapshots are stored directly in your test file:
|
||||
|
||||
```ts
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("inline snapshot", () => {
|
||||
// First run: snapshot will be inserted automatically
|
||||
expect({ hello: "world" }).toMatchInlineSnapshot();
|
||||
|
||||
// After first run, the test file will be updated to:
|
||||
// expect({ hello: "world" }).toMatchInlineSnapshot(`
|
||||
// {
|
||||
// "hello": "world",
|
||||
// }
|
||||
// `);
|
||||
});
|
||||
```
|
||||
|
||||
When you run the test, Bun automatically updates the test file itself with the generated snapshot string. This makes the tests more portable and easier to understand, since the expected output is right next to the test.
|
||||
|
||||
### Using inline snapshots
|
||||
|
||||
1. Write your test with `.toMatchInlineSnapshot()`
|
||||
2. Run the test once
|
||||
3. Bun automatically updates your test file with the snapshot
|
||||
4. On subsequent runs, the value will be compared against the inline snapshot
|
||||
|
||||
Inline snapshots are particularly useful for small, simple values where it's helpful to see the expected output right in the test file.
|
||||
|
||||
## Error snapshots
|
||||
|
||||
You can also snapshot error messages using `.toThrowErrorMatchingSnapshot()` and `.toThrowErrorMatchingInlineSnapshot()`:
|
||||
|
||||
```ts
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("error snapshot", () => {
|
||||
expect(() => {
|
||||
throw new Error("Something went wrong");
|
||||
}).toThrowErrorMatchingSnapshot();
|
||||
|
||||
expect(() => {
|
||||
throw new Error("Another error");
|
||||
}).toThrowErrorMatchingInlineSnapshot();
|
||||
});
|
||||
```
|
||||
|
||||
@@ -74,9 +74,29 @@ test("it was 2020, for a moment.", () => {
|
||||
});
|
||||
```
|
||||
|
||||
## Get mocked time with `jest.now()`
|
||||
|
||||
When you're using mocked time (with `setSystemTime` or `useFakeTimers`), you can use `jest.now()` to get the current mocked timestamp:
|
||||
|
||||
```ts
|
||||
import { test, expect, jest } from "bun:test";
|
||||
|
||||
test("get the current mocked time", () => {
|
||||
jest.useFakeTimers();
|
||||
jest.setSystemTime(new Date("2020-01-01T00:00:00.000Z"));
|
||||
|
||||
expect(Date.now()).toBe(1577836800000); // Jan 1, 2020 timestamp
|
||||
expect(jest.now()).toBe(1577836800000); // Same value
|
||||
|
||||
jest.useRealTimers();
|
||||
});
|
||||
```
|
||||
|
||||
This is useful when you need to access the mocked time directly without creating a new Date object.
|
||||
|
||||
## Set the time zone
|
||||
|
||||
To change the time zone, either pass the `$TZ` environment variable to `bun test`.
|
||||
By default, the time zone for all `bun test` runs is set to UTC (`Etc/UTC`) unless overridden. To change the time zone, either pass the `$TZ` environment variable to `bun test`.
|
||||
|
||||
```sh
|
||||
TZ=America/Los_Angeles bun test
|
||||
|
||||
@@ -78,9 +78,11 @@ test("wat", async () => {
|
||||
|
||||
In `bun:test`, test timeouts throw an uncatchable exception to force the test to stop running and fail. We also kill any child processes that were spawned in the test to avoid leaving behind zombie processes lurking in the background.
|
||||
|
||||
The default timeout for each test is 5000ms (5 seconds) if not overridden by this timeout option or `jest.setDefaultTimeout()`.
|
||||
|
||||
### 🧟 Zombie process killer
|
||||
|
||||
When a test times out and processes spawned in the test via `Bun.spawn`, `Bun.spawnSync`, or `node:child_process` are not killed, they will be automatically killed and a message will be logged to the console.
|
||||
When a test times out and processes spawned in the test via `Bun.spawn`, `Bun.spawnSync`, or `node:child_process` are not killed, they will be automatically killed and a message will be logged to the console. This prevents zombie processes from lingering in the background after timed-out tests.
|
||||
|
||||
## `test.skip`
|
||||
|
||||
@@ -125,7 +127,7 @@ fix the test.
|
||||
|
||||
## `test.only`
|
||||
|
||||
To run a particular test or suite of tests use `test.only()` or `describe.only()`. Once declared, running `bun test --only` will only execute tests/suites that have been marked with `.only()`. Running `bun test` without the `--only` option with `test.only()` declared will result in all tests in the given suite being executed _up to_ the test with `.only()`. `describe.only()` functions the same in both execution scenarios.
|
||||
To run a particular test or suite of tests use `test.only()` or `describe.only()`.
|
||||
|
||||
```ts
|
||||
import { test, describe } from "bun:test";
|
||||
@@ -197,22 +199,121 @@ test.todoIf(macOS)("runs on posix", () => {
|
||||
});
|
||||
```
|
||||
|
||||
## `test.each`
|
||||
## `test.failing`
|
||||
|
||||
To return a function for multiple cases in a table of tests, use `test.each`.
|
||||
Use `test.failing()` when you know a test is currently failing but you want to track it and be notified when it starts passing. This inverts the test result:
|
||||
|
||||
- A failing test marked with `.failing()` will pass
|
||||
- A passing test marked with `.failing()` will fail (with a message indicating it's now passing and should be fixed)
|
||||
|
||||
```ts
|
||||
// This will pass because the test is failing as expected
|
||||
test.failing("math is broken", () => {
|
||||
expect(0.1 + 0.2).toBe(0.3); // fails due to floating point precision
|
||||
});
|
||||
|
||||
// This will fail with a message that the test is now passing
|
||||
test.failing("fixed bug", () => {
|
||||
expect(1 + 1).toBe(2); // passes, but we expected it to fail
|
||||
});
|
||||
```
|
||||
|
||||
This is useful for tracking known bugs that you plan to fix later, or for implementing test-driven development.
|
||||
|
||||
## Conditional Tests for Describe Blocks
|
||||
|
||||
The conditional modifiers `.if()`, `.skipIf()`, and `.todoIf()` can also be applied to `describe` blocks, affecting all tests within the suite:
|
||||
|
||||
```ts
|
||||
const isMacOS = process.platform === "darwin";
|
||||
|
||||
// Only runs the entire suite on macOS
|
||||
describe.if(isMacOS)("macOS-specific features", () => {
|
||||
test("feature A", () => {
|
||||
// only runs on macOS
|
||||
});
|
||||
|
||||
test("feature B", () => {
|
||||
// only runs on macOS
|
||||
});
|
||||
});
|
||||
|
||||
// Skips the entire suite on Windows
|
||||
describe.skipIf(process.platform === "win32")("Unix features", () => {
|
||||
test("feature C", () => {
|
||||
// skipped on Windows
|
||||
});
|
||||
});
|
||||
|
||||
// Marks the entire suite as TODO on Linux
|
||||
describe.todoIf(process.platform === "linux")("Upcoming Linux support", () => {
|
||||
test("feature D", () => {
|
||||
// marked as TODO on Linux
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## `test.each` and `describe.each`
|
||||
|
||||
To run the same test with multiple sets of data, use `test.each`. This creates a parametrized test that runs once for each test case provided.
|
||||
|
||||
```ts
|
||||
const cases = [
|
||||
[1, 2, 3],
|
||||
[3, 4, 5],
|
||||
[3, 4, 7],
|
||||
];
|
||||
|
||||
test.each(cases)("%p + %p should be %p", (a, b, expected) => {
|
||||
// runs once for each test case provided
|
||||
expect(a + b).toBe(expected);
|
||||
});
|
||||
```
|
||||
|
||||
There are a number of options available for formatting the case label depending on its type.
|
||||
You can also use `describe.each` to create a parametrized suite that runs once for each test case:
|
||||
|
||||
```ts
|
||||
describe.each([
|
||||
[1, 2, 3],
|
||||
[3, 4, 7],
|
||||
])("add(%i, %i)", (a, b, expected) => {
|
||||
test(`returns ${expected}`, () => {
|
||||
expect(a + b).toBe(expected);
|
||||
});
|
||||
|
||||
test(`sum is greater than each value`, () => {
|
||||
expect(a + b).toBeGreaterThan(a);
|
||||
expect(a + b).toBeGreaterThan(b);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Argument Passing
|
||||
|
||||
How arguments are passed to your test function depends on the structure of your test cases:
|
||||
|
||||
- If a table row is an array (like `[1, 2, 3]`), each element is passed as an individual argument
|
||||
- If a row is not an array (like an object), it's passed as a single argument
|
||||
|
||||
```ts
|
||||
// Array items passed as individual arguments
|
||||
test.each([
|
||||
[1, 2, 3],
|
||||
[4, 5, 9],
|
||||
])("add(%i, %i) = %i", (a, b, expected) => {
|
||||
expect(a + b).toBe(expected);
|
||||
});
|
||||
|
||||
// Object items passed as a single argument
|
||||
test.each([
|
||||
{ a: 1, b: 2, expected: 3 },
|
||||
{ a: 4, b: 5, expected: 9 },
|
||||
])("add($a, $b) = $expected", data => {
|
||||
expect(data.a + data.b).toBe(data.expected);
|
||||
});
|
||||
```
|
||||
|
||||
### Format Specifiers
|
||||
|
||||
There are a number of options available for formatting the test title:
|
||||
|
||||
{% table %}
|
||||
|
||||
@@ -263,6 +364,68 @@ There are a number of options available for formatting the case label depending
|
||||
|
||||
{% /table %}
|
||||
|
||||
#### Examples
|
||||
|
||||
```ts
|
||||
// Basic specifiers
|
||||
test.each([
|
||||
["hello", 123],
|
||||
["world", 456],
|
||||
])("string: %s, number: %i", (str, num) => {
|
||||
// "string: hello, number: 123"
|
||||
// "string: world, number: 456"
|
||||
});
|
||||
|
||||
// %p for pretty-format output
|
||||
test.each([
|
||||
[{ name: "Alice" }, { a: 1, b: 2 }],
|
||||
[{ name: "Bob" }, { x: 5, y: 10 }],
|
||||
])("user %p with data %p", (user, data) => {
|
||||
// "user { name: 'Alice' } with data { a: 1, b: 2 }"
|
||||
// "user { name: 'Bob' } with data { x: 5, y: 10 }"
|
||||
});
|
||||
|
||||
// %# for index
|
||||
test.each(["apple", "banana"])("fruit #%# is %s", fruit => {
|
||||
// "fruit #0 is apple"
|
||||
// "fruit #1 is banana"
|
||||
});
|
||||
```
|
||||
|
||||
## Assertion Counting
|
||||
|
||||
Bun supports verifying that a specific number of assertions were called during a test:
|
||||
|
||||
### expect.hasAssertions()
|
||||
|
||||
Use `expect.hasAssertions()` to verify that at least one assertion is called during a test:
|
||||
|
||||
```ts
|
||||
test("async work calls assertions", async () => {
|
||||
expect.hasAssertions(); // Will fail if no assertions are called
|
||||
|
||||
const data = await fetchData();
|
||||
expect(data).toBeDefined();
|
||||
});
|
||||
```
|
||||
|
||||
This is especially useful for async tests to ensure your assertions actually run.
|
||||
|
||||
### expect.assertions(count)
|
||||
|
||||
Use `expect.assertions(count)` to verify that a specific number of assertions are called during a test:
|
||||
|
||||
```ts
|
||||
test("exactly two assertions", () => {
|
||||
expect.assertions(2); // Will fail if not exactly 2 assertions are called
|
||||
|
||||
expect(1 + 1).toBe(2);
|
||||
expect("hello").toContain("ell");
|
||||
});
|
||||
```
|
||||
|
||||
This helps ensure all your assertions run, especially in complex async code with multiple code paths.
|
||||
|
||||
## Matchers
|
||||
|
||||
Bun implements the following matchers. Full Jest compatibility is on the roadmap; track progress [here](https://github.com/oven-sh/bun/issues/1825).
|
||||
|
||||
@@ -17,7 +17,7 @@ Bun supports things like top-level await, JSX, and extensioned `.ts` imports, wh
|
||||
```jsonc
|
||||
{
|
||||
"compilerOptions": {
|
||||
// Enable latest features
|
||||
// Environment setup & latest features
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
@@ -35,12 +35,13 @@ Bun supports things like top-level await, JSX, and extensioned `.ts` imports, wh
|
||||
"strict": true,
|
||||
"skipLibCheck": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noUncheckedIndexedAccess": true,
|
||||
|
||||
// Some stricter flags
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"noPropertyAccessFromIndexSignature": true
|
||||
}
|
||||
// Some stricter flags (disabled by default)
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"noPropertyAccessFromIndexSignature": false,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
142
misctools/gdb/std_gdb_pretty_printers.py
Normal file
142
misctools/gdb/std_gdb_pretty_printers.py
Normal file
@@ -0,0 +1,142 @@
|
||||
# pretty printing for the standard library.
|
||||
# put "source /path/to/stage2_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
|
||||
import re
|
||||
import gdb.printing
|
||||
|
||||
# Handles both ArrayList and ArrayListUnmanaged.
|
||||
class ArrayListPrinter:
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
type = self.val.type.name[len('std.array_list.'):]
|
||||
type = re.sub(r'^ArrayListAligned(Unmanaged)?\((.*),null\)$', r'ArrayList\1(\2)', type)
|
||||
return '%s of length %s, capacity %s' % (type, self.val['items']['len'], self.val['capacity'])
|
||||
|
||||
def children(self):
|
||||
for i in range(self.val['items']['len']):
|
||||
item = self.val['items']['ptr'] + i
|
||||
yield ('[%d]' % i, item.dereference())
|
||||
|
||||
def display_hint(self):
|
||||
return 'array'
|
||||
|
||||
class MultiArrayListPrinter:
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
def child_type(self):
|
||||
(helper_fn, _) = gdb.lookup_symbol('%s.dbHelper' % self.val.type.name)
|
||||
return helper_fn.type.fields()[1].type.target()
|
||||
|
||||
def to_string(self):
|
||||
type = self.val.type.name[len('std.multi_array_list.'):]
|
||||
return '%s of length %s, capacity %s' % (type, self.val['len'], self.val['capacity'])
|
||||
|
||||
def slice(self):
|
||||
fields = self.child_type().fields()
|
||||
base = self.val['bytes']
|
||||
cap = self.val['capacity']
|
||||
len = self.val['len']
|
||||
|
||||
if len == 0:
|
||||
return
|
||||
|
||||
fields = sorted(fields, key=lambda field: field.type.alignof, reverse=True)
|
||||
|
||||
for field in fields:
|
||||
ptr = base.cast(field.type.pointer()).dereference().cast(field.type.array(len - 1))
|
||||
base += field.type.sizeof * cap
|
||||
yield (field.name, ptr)
|
||||
|
||||
def children(self):
|
||||
for i, (name, ptr) in enumerate(self.slice()):
|
||||
yield ('[%d]' % i, name)
|
||||
yield ('[%d]' % i, ptr)
|
||||
|
||||
def display_hint(self):
|
||||
return 'map'
|
||||
|
||||
# Handles both HashMap and HashMapUnmanaged.
|
||||
class HashMapPrinter:
|
||||
def __init__(self, val):
|
||||
self.type = val.type
|
||||
is_managed = re.search(r'^std\.hash_map\.HashMap\(', self.type.name)
|
||||
self.val = val['unmanaged'] if is_managed else val
|
||||
|
||||
def header_ptr_type(self):
|
||||
(helper_fn, _) = gdb.lookup_symbol('%s.dbHelper' % self.val.type.name)
|
||||
return helper_fn.type.fields()[1].type
|
||||
|
||||
def header(self):
|
||||
if self.val['metadata'] == 0:
|
||||
return None
|
||||
return (self.val['metadata'].cast(self.header_ptr_type()) - 1).dereference()
|
||||
|
||||
def to_string(self):
|
||||
type = self.type.name[len('std.hash_map.'):]
|
||||
type = re.sub(r'^HashMap(Unmanaged)?\((.*),std.hash_map.AutoContext\(.*$', r'AutoHashMap\1(\2)', type)
|
||||
hdr = self.header()
|
||||
if hdr is not None:
|
||||
cap = hdr['capacity']
|
||||
else:
|
||||
cap = 0
|
||||
return '%s of length %s, capacity %s' % (type, self.val['size'], cap)
|
||||
|
||||
def children(self):
|
||||
hdr = self.header()
|
||||
if hdr is None:
|
||||
return
|
||||
is_map = self.display_hint() == 'map'
|
||||
for i in range(hdr['capacity']):
|
||||
metadata = self.val['metadata'] + i
|
||||
if metadata.dereference()['used'] == 1:
|
||||
yield ('[%d]' % i, (hdr['keys'] + i).dereference())
|
||||
if is_map:
|
||||
yield ('[%d]' % i, (hdr['values'] + i).dereference())
|
||||
|
||||
def display_hint(self):
|
||||
for field in self.header_ptr_type().target().fields():
|
||||
if field.name == 'values':
|
||||
return 'map'
|
||||
return 'array'
|
||||
|
||||
# Handles both ArrayHashMap and ArrayHashMapUnmanaged.
|
||||
class ArrayHashMapPrinter:
|
||||
def __init__(self, val):
|
||||
self.type = val.type
|
||||
is_managed = re.search(r'^std\.array_hash_map\.ArrayHashMap\(', self.type.name)
|
||||
self.val = val['unmanaged'] if is_managed else val
|
||||
|
||||
def to_string(self):
|
||||
type = self.type.name[len('std.array_hash_map.'):]
|
||||
type = re.sub(r'^ArrayHashMap(Unmanaged)?\((.*),std.array_hash_map.AutoContext\(.*$', r'AutoArrayHashMap\1(\2)', type)
|
||||
return '%s of length %s' % (type, self.val['entries']['len'])
|
||||
|
||||
def children(self):
|
||||
entries = MultiArrayListPrinter(self.val['entries'])
|
||||
len = self.val['entries']['len']
|
||||
fields = {}
|
||||
for name, ptr in entries.slice():
|
||||
fields[str(name)] = ptr
|
||||
|
||||
for i in range(len):
|
||||
if 'key' in fields:
|
||||
yield ('[%d]' % i, fields['key'][i])
|
||||
else:
|
||||
yield ('[%d]' % i, '{}')
|
||||
if 'value' in fields:
|
||||
yield ('[%d]' % i, fields['value'][i])
|
||||
|
||||
def display_hint(self):
|
||||
for name, ptr in MultiArrayListPrinter(self.val['entries']).slice():
|
||||
if name == 'value':
|
||||
return 'map'
|
||||
return 'array'
|
||||
|
||||
pp = gdb.printing.RegexpCollectionPrettyPrinter('Zig standard library')
|
||||
pp.add_printer('ArrayList', r'^std\.array_list\.ArrayListAligned(Unmanaged)?\(.*\)$', ArrayListPrinter)
|
||||
pp.add_printer('MultiArrayList', r'^std\.multi_array_list\.MultiArrayList\(.*\)$', MultiArrayListPrinter)
|
||||
pp.add_printer('HashMap', r'^std\.hash_map\.HashMap(Unmanaged)?\(.*\)$', HashMapPrinter)
|
||||
pp.add_printer('ArrayHashMap', r'^std\.array_hash_map\.ArrayHashMap(Unmanaged)?\(.*\)$', ArrayHashMapPrinter)
|
||||
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
|
||||
63
misctools/gdb/zig_gdb_pretty_printers.py
Normal file
63
misctools/gdb/zig_gdb_pretty_printers.py
Normal file
@@ -0,0 +1,63 @@
|
||||
# pretty printing for the language.
|
||||
# put "source /path/to/zig_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
|
||||
import gdb.printing
|
||||
|
||||
|
||||
class ZigPrettyPrinter(gdb.printing.PrettyPrinter):
|
||||
def __init__(self):
|
||||
super().__init__('Zig')
|
||||
|
||||
def __call__(self, val):
|
||||
tag = val.type.tag
|
||||
if tag is None:
|
||||
return None
|
||||
if tag == '[]u8':
|
||||
return StringPrinter(val)
|
||||
if tag.startswith('[]'):
|
||||
return SlicePrinter(val)
|
||||
if tag.startswith('?'):
|
||||
return OptionalPrinter(val)
|
||||
return None
|
||||
|
||||
|
||||
class SlicePrinter:
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
return f"{self.val['len']} items at {self.val['ptr']}"
|
||||
|
||||
def children(self):
|
||||
def it(val):
|
||||
for i in range(int(val['len'])):
|
||||
item = val['ptr'] + i
|
||||
yield (f'[{i}]', item.dereference())
|
||||
return it(self.val)
|
||||
|
||||
def display_hint(self):
|
||||
return 'array'
|
||||
|
||||
|
||||
class StringPrinter:
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
return self.val['ptr'].string(length=int(self.val['len']))
|
||||
|
||||
def display_hint(self):
|
||||
return 'string'
|
||||
|
||||
|
||||
class OptionalPrinter:
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
def to_string(self):
|
||||
if self.val['some']:
|
||||
return self.val['data']
|
||||
else:
|
||||
return 'null'
|
||||
|
||||
|
||||
gdb.printing.register_pretty_printer(gdb.current_objfile(), ZigPrettyPrinter())
|
||||
@@ -29,6 +29,9 @@
|
||||
"test/js/**/*bad.js",
|
||||
"test/bundler/transpiler/decorators.test.ts", // uses `arguments` as decorator
|
||||
"test/bundler/native-plugin.test.ts", // parser doesn't handle import metadata
|
||||
"test/bundler/transpiler/with-statement-works.js", // parser doesn't allow `with` statement
|
||||
"test/js/node/module/extensions-fixture", // these files are not meant to be linted
|
||||
"test/cli/run/module-type-fixture",
|
||||
"test/bundler/transpiler/with-statement-works.js" // parser doesn't allow `with` statement
|
||||
],
|
||||
"overrides": [
|
||||
|
||||
10
package.json
10
package.json
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "bun",
|
||||
"version": "1.2.6",
|
||||
"version": "1.2.10",
|
||||
"workspaces": [
|
||||
"./packages/bun-types"
|
||||
],
|
||||
@@ -31,6 +31,7 @@
|
||||
},
|
||||
"scripts": {
|
||||
"build": "bun run build:debug",
|
||||
"bd": "(bun run --silent build:debug &> /tmp/bun.debug.build.log || (cat /tmp/bun.debug.build.log && rm -rf /tmp/bun.debug.build.log && exit 1)) && rm -f /tmp/bun.debug.build.log && ./build/debug/bun-debug",
|
||||
"build:debug": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -B build/debug",
|
||||
"build:valgrind": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -DENABLE_BASELINE=ON -ENABLE_VALGRIND=ON -B build/debug-valgrind",
|
||||
"build:release": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -B build/release",
|
||||
@@ -44,6 +45,7 @@
|
||||
"build:release:with_logs": "cmake . -DCMAKE_BUILD_TYPE=Release -DENABLE_LOGS=true -GNinja -Bbuild-release && ninja -Cbuild-release",
|
||||
"build:debug-zig-release": "cmake . -DCMAKE_BUILD_TYPE=Release -DZIG_OPTIMIZE=Debug -GNinja -Bbuild-debug-zig-release && ninja -Cbuild-debug-zig-release",
|
||||
"css-properties": "bun run src/css/properties/generate_properties.ts",
|
||||
"uv-posix-stubs": "bun run src/bun.js/bindings/libuv/generate_uv_posix_stubs.ts",
|
||||
"bump": "bun ./scripts/bump.ts",
|
||||
"typecheck": "tsc --noEmit && cd test && bun run typecheck",
|
||||
"fmt": "bun run prettier",
|
||||
@@ -55,6 +57,9 @@
|
||||
"test:release": "node scripts/runner.node.mjs --exec-path ./build/release/bun",
|
||||
"banned": "bun test test/internal/ban-words.test.ts",
|
||||
"zig": "vendor/zig/zig.exe",
|
||||
"zig:test": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -DBUN_TEST=ON -B build/debug",
|
||||
"zig:test:release": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -DBUNTEST=ON -B build/release",
|
||||
"zig:test:ci": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -DBUN_TEST=ON -DZIG_OPTIMIZE=ReleaseSafe -DCMAKE_VERBOSE_MAKEFILE=ON -DCI=true -B build/release-ci --verbose --fresh",
|
||||
"zig:fmt": "bun run zig-format",
|
||||
"zig:check": "bun run zig build check --summary new",
|
||||
"zig:check-all": "bun run zig build check-all --summary new",
|
||||
@@ -74,6 +79,7 @@
|
||||
"prettier:check": "bun run analysis:no-llvm --target prettier-check",
|
||||
"prettier:extra": "bun run analysis:no-llvm --target prettier-extra",
|
||||
"prettier:diff": "bun run analysis:no-llvm --target prettier-diff",
|
||||
"node:test": "node ./scripts/runner.node.mjs --quiet --exec-path=$npm_execpath --node-tests "
|
||||
"node:test": "node ./scripts/runner.node.mjs --quiet --exec-path=$npm_execpath --node-tests ",
|
||||
"clean:zig": "rm -rf build/debug/cache/zig build/debug/CMakeCache.txt 'build/debug/*.o' .zig-cache zig-out || true"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,12 @@
|
||||
"": {
|
||||
"name": "bun-plugin-svelte",
|
||||
"devDependencies": {
|
||||
"@threlte/core": "8.0.1",
|
||||
"bun-types": "canary",
|
||||
"svelte": "^5.20.4",
|
||||
},
|
||||
"peerDependencies": {
|
||||
"svelte": "^5",
|
||||
"typescript": "^5",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -26,6 +26,8 @@
|
||||
|
||||
"@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.25", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ=="],
|
||||
|
||||
"@threlte/core": ["@threlte/core@8.0.1", "", { "dependencies": { "mitt": "^3.0.1" }, "peerDependencies": { "svelte": ">=5", "three": ">=0.155" } }, "sha512-vy1xRQppJFNmfPTeiRQue+KmYFsbPgVhwuYXRTvVrwPeD2oYz43gxUeOpe1FACeGKxrxZykeKJF5ebVvl7gBxw=="],
|
||||
|
||||
"@types/estree": ["@types/estree@1.0.6", "", {}, "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw=="],
|
||||
|
||||
"@types/node": ["@types/node@22.13.5", "", { "dependencies": { "undici-types": "~6.20.0" } }, "sha512-+lTU0PxZXn0Dr1NBtC7Y8cR21AJr87dLLU953CWA6pMxxv/UDc7jYAY90upcrie1nRcD6XNG5HOYEDtgW5TxAg=="],
|
||||
@@ -54,9 +56,11 @@
|
||||
|
||||
"magic-string": ["magic-string@0.30.17", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" } }, "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA=="],
|
||||
|
||||
"mitt": ["mitt@3.0.1", "", {}, "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw=="],
|
||||
|
||||
"svelte": ["svelte@5.20.4", "", { "dependencies": { "@ampproject/remapping": "^2.3.0", "@jridgewell/sourcemap-codec": "^1.5.0", "@types/estree": "^1.0.5", "acorn": "^8.12.1", "acorn-typescript": "^1.4.13", "aria-query": "^5.3.1", "axobject-query": "^4.1.0", "clsx": "^2.1.1", "esm-env": "^1.2.1", "esrap": "^1.4.3", "is-reference": "^3.0.3", "locate-character": "^3.0.0", "magic-string": "^0.30.11", "zimmerframe": "^1.1.2" } }, "sha512-2Mo/AfObaw9zuD0u1JJ7sOVzRCGcpETEyDkLbtkcctWpCMCIyT0iz83xD8JT29SR7O4SgswuPRIDYReYF/607A=="],
|
||||
|
||||
"typescript": ["typescript@5.7.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw=="],
|
||||
"three": ["three@0.174.0", "", {}, "sha512-p+WG3W6Ov74alh3geCMkGK9NWuT62ee21cV3jEnun201zodVF4tCE5aZa2U122/mkLRmhJJUQmLLW1BH00uQJQ=="],
|
||||
|
||||
"undici-types": ["undici-types@6.20.0", "", {}, "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg=="],
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "bun-plugin-svelte",
|
||||
"version": "0.0.5",
|
||||
"version": "0.0.6",
|
||||
"description": "Official Svelte plugin for Bun",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -11,7 +11,11 @@ describe("SveltePlugin", () => {
|
||||
expect(() => SveltePlugin(undefined)).not.toThrow();
|
||||
});
|
||||
|
||||
it.each([null, 1, "hi", {}, "Client"])("throws if forceSide is not 'client' or 'server' (%p)", (forceSide: any) => {
|
||||
it.each([1, "hi", {}, "Client"])("throws if forceSide is not 'client' or 'server' (%p)", (forceSide: any) => {
|
||||
expect(() => SveltePlugin({ forceSide })).toThrow(TypeError);
|
||||
});
|
||||
|
||||
it.each([null, undefined])("forceSide may be nullish", (forceSide: any) => {
|
||||
expect(() => SveltePlugin({ forceSide })).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -2,7 +2,7 @@ import { describe, beforeAll, it, expect } from "bun:test";
|
||||
import type { BuildConfig } from "bun";
|
||||
import type { CompileOptions } from "svelte/compiler";
|
||||
|
||||
import { getBaseCompileOptions, type SvelteOptions } from "./options";
|
||||
import { getBaseCompileOptions, validateOptions, type SvelteOptions } from "./options";
|
||||
|
||||
describe("getBaseCompileOptions", () => {
|
||||
describe("when no options are provided", () => {
|
||||
@@ -42,4 +42,13 @@ describe("getBaseCompileOptions", () => {
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
}); // getBaseCompileOptions
|
||||
|
||||
describe("validateOptions(options)", () => {
|
||||
it.each(["", 1, null, undefined, true, false, Symbol("hi")])(
|
||||
"throws if options is not an object (%p)",
|
||||
(badOptions: any) => {
|
||||
expect(() => validateOptions(badOptions)).toThrow();
|
||||
},
|
||||
);
|
||||
}); // validateOptions
|
||||
|
||||
@@ -2,7 +2,8 @@ import { strict as assert } from "node:assert";
|
||||
import { type BuildConfig } from "bun";
|
||||
import type { CompileOptions, ModuleCompileOptions } from "svelte/compiler";
|
||||
|
||||
export interface SvelteOptions {
|
||||
type OverrideCompileOptions = Pick<CompileOptions, "customElement" | "runes" | "modernAst" | "namespace">;
|
||||
export interface SvelteOptions extends Pick<CompileOptions, "runes"> {
|
||||
/**
|
||||
* Force client-side or server-side generation.
|
||||
*
|
||||
@@ -20,6 +21,11 @@ export interface SvelteOptions {
|
||||
* Defaults to `true` when run via Bun's dev server, `false` otherwise.
|
||||
*/
|
||||
development?: boolean;
|
||||
|
||||
/**
|
||||
* Options to forward to the Svelte compiler.
|
||||
*/
|
||||
compilerOptions?: OverrideCompileOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -27,15 +33,24 @@ export interface SvelteOptions {
|
||||
*/
|
||||
export function validateOptions(options: unknown): asserts options is SvelteOptions {
|
||||
assert(options && typeof options === "object", new TypeError("bun-svelte-plugin: options must be an object"));
|
||||
if ("forceSide" in options) {
|
||||
switch (options.forceSide) {
|
||||
const opts = options as Record<keyof SvelteOptions, unknown>;
|
||||
|
||||
if (opts.forceSide != null) {
|
||||
if (typeof opts.forceSide !== "string") {
|
||||
throw new TypeError("bun-svelte-plugin: forceSide must be a string, got " + typeof opts.forceSide);
|
||||
}
|
||||
switch (opts.forceSide) {
|
||||
case "client":
|
||||
case "server":
|
||||
break;
|
||||
default:
|
||||
throw new TypeError(
|
||||
`bun-svelte-plugin: forceSide must be either 'client' or 'server', got ${options.forceSide}`,
|
||||
);
|
||||
throw new TypeError(`bun-svelte-plugin: forceSide must be either 'client' or 'server', got ${opts.forceSide}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (opts.compilerOptions) {
|
||||
if (typeof opts.compilerOptions !== "object") {
|
||||
throw new TypeError("bun-svelte-plugin: compilerOptions must be an object");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -44,7 +59,10 @@ export function validateOptions(options: unknown): asserts options is SvelteOpti
|
||||
* @internal
|
||||
*/
|
||||
export function getBaseCompileOptions(pluginOptions: SvelteOptions, config: Partial<BuildConfig>): CompileOptions {
|
||||
let { development = false } = pluginOptions;
|
||||
let {
|
||||
development = false,
|
||||
compilerOptions: { customElement, runes, modernAst, namespace } = kEmptyObject as OverrideCompileOptions,
|
||||
} = pluginOptions;
|
||||
const { minify = false } = config;
|
||||
|
||||
const shouldMinify = Boolean(minify);
|
||||
@@ -68,6 +86,10 @@ export function getBaseCompileOptions(pluginOptions: SvelteOptions, config: Part
|
||||
preserveWhitespace: !minifyWhitespace,
|
||||
preserveComments: !shouldMinify,
|
||||
dev: development,
|
||||
customElement,
|
||||
runes,
|
||||
modernAst,
|
||||
namespace,
|
||||
cssHash({ css }) {
|
||||
// same prime number seed used by svelte/compiler.
|
||||
// TODO: ensure this provides enough entropy
|
||||
@@ -109,3 +131,4 @@ function generateSide(pluginOptions: SvelteOptions, config: Partial<BuildConfig>
|
||||
}
|
||||
|
||||
export const hash = (content: string): string => Bun.hash(content, 5381).toString(36);
|
||||
const kEmptyObject = Object.create(null);
|
||||
|
||||
@@ -24,13 +24,32 @@ afterAll(() => {
|
||||
}
|
||||
});
|
||||
|
||||
it("hello world component", async () => {
|
||||
const res = await Bun.build({
|
||||
entrypoints: [fixturePath("foo.svelte")],
|
||||
outdir,
|
||||
plugins: [SveltePlugin()],
|
||||
describe("given a hello world component", () => {
|
||||
const entrypoints = [fixturePath("foo.svelte")];
|
||||
it("when no options are provided, builds successfully", async () => {
|
||||
const res = await Bun.build({
|
||||
entrypoints,
|
||||
outdir,
|
||||
plugins: [SveltePlugin()],
|
||||
});
|
||||
expect(res.success).toBeTrue();
|
||||
});
|
||||
|
||||
describe("when a custom element is provided", () => {
|
||||
let res: BuildOutput;
|
||||
|
||||
beforeAll(async () => {
|
||||
res = await Bun.build({
|
||||
entrypoints,
|
||||
outdir,
|
||||
plugins: [SveltePlugin({ compilerOptions: { customElement: true } })],
|
||||
});
|
||||
});
|
||||
|
||||
it("builds successfully", () => {
|
||||
expect(res.success).toBeTrue();
|
||||
});
|
||||
});
|
||||
expect(res.success).toBeTrue();
|
||||
});
|
||||
|
||||
describe("when importing `.svelte.ts` files with ESM", () => {
|
||||
|
||||
@@ -20,7 +20,7 @@ That's it! VS Code and TypeScript automatically load `@types/*` packages into yo
|
||||
|
||||
# Contributing
|
||||
|
||||
The `@types/bun` package is a shim that loads `bun-types`. The `bun-types` package lives in the Bun repo under `packages/bun-types`. It is generated via [./scripts/bundle.ts](./scripts/bundle.ts).
|
||||
The `@types/bun` package is a shim that loads `bun-types`. The `bun-types` package lives in the Bun repo under `packages/bun-types`.
|
||||
|
||||
To add a new file, add it under `packages/bun-types`. Then add a [triple-slash directive](https://www.typescriptlang.org/docs/handbook/triple-slash-directives.html) pointing to it inside [./index.d.ts](./index.d.ts).
|
||||
|
||||
@@ -28,8 +28,6 @@ To add a new file, add it under `packages/bun-types`. Then add a [triple-slash d
|
||||
+ /// <reference path="./newfile.d.ts" />
|
||||
```
|
||||
|
||||
[`./bundle.ts`](./bundle.ts) merges the types in this folder into a single file. To run it:
|
||||
|
||||
```bash
|
||||
bun build
|
||||
```
|
||||
|
||||
114
packages/bun-types/authoring.md
Normal file
114
packages/bun-types/authoring.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Authoring @types/bun
|
||||
|
||||
These declarations define the `'bun'` module, the `Bun` global variable, and lots of other global declarations like extending the `fetch` interface.
|
||||
|
||||
## The `'bun'` Module
|
||||
|
||||
The `Bun` global variable and the `'bun'` module types are defined with one syntax. It supports declaring both types/interfaces and runtime values:
|
||||
|
||||
```typescript
|
||||
declare module "bun" {
|
||||
// Your types go here
|
||||
interface MyInterface {
|
||||
// ...
|
||||
}
|
||||
|
||||
type MyType = string | number;
|
||||
|
||||
function myFunction(): void;
|
||||
}
|
||||
```
|
||||
|
||||
You can write as many `declare module "bun"` declarations as you need. Symbols will be accessible from other files inside of the declaration, and they will all be merged when the types are evaluated.
|
||||
|
||||
You can consume these declarations in two ways:
|
||||
|
||||
1. Importing it from `'bun'`:
|
||||
|
||||
```typescript
|
||||
import { type MyInterface, type MyType, myFunction } from "bun";
|
||||
|
||||
const myInterface: MyInterface = {};
|
||||
const myType: MyType = "cool";
|
||||
myFunction();
|
||||
```
|
||||
|
||||
2. Using the global `Bun` object:
|
||||
|
||||
```typescript
|
||||
const myInterface: Bun.MyInterface = {};
|
||||
const myType: Bun.MyType = "cool";
|
||||
Bun.myFunction();
|
||||
```
|
||||
|
||||
Consuming them inside the ambient declarations is also easy:
|
||||
|
||||
```ts
|
||||
// These are equivalent
|
||||
type A = import("bun").MyType;
|
||||
type A = Bun.MyType;
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
Types are organized across multiple `.d.ts` files in the `packages/bun-types` directory:
|
||||
|
||||
- `index.d.ts` - The main entry point that references all other type files
|
||||
- `bun.d.ts` - Core Bun APIs and types
|
||||
- `globals.d.ts` - Global type declarations
|
||||
- `test.d.ts` - Testing-related types
|
||||
- `sqlite.d.ts` - SQLite-related types
|
||||
- ...etc. You can make more files
|
||||
|
||||
Note: The order of references in `index.d.ts` is important - `bun.ns.d.ts` must be referenced last to ensure the `Bun` global gets defined properly.
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Type Safety**
|
||||
|
||||
- Please use strict types instead of `any` where possible
|
||||
- Leverage TypeScript's type system features (generics, unions, etc.)
|
||||
- Document complex types with JSDoc comments
|
||||
|
||||
2. **Compatibility**
|
||||
|
||||
- Use `Bun.__internal.UseLibDomIfAvailable<LibDomName extends string, OurType>` for types that might conflict with lib.dom.d.ts (see [`./fetch.d.ts`](./fetch.d.ts) for a real example)
|
||||
- `@types/node` often expects variables to always be defined (this was the biggest cause of most of the conflicts in the past!), so we use the `UseLibDomIfAvailable` type to make sure we don't overwrite `lib.dom.d.ts` but still provide Bun types while simultaneously declaring the variable exists (for Node to work) in the cases that we can.
|
||||
|
||||
3. **Documentation**
|
||||
- Add JSDoc comments for public APIs
|
||||
- Include examples in documentation when helpful
|
||||
- Document default values and important behaviors
|
||||
|
||||
### Internal Types
|
||||
|
||||
For internal types that shouldn't be exposed to users, use the `__internal` namespace:
|
||||
|
||||
```typescript
|
||||
declare module "bun" {
|
||||
namespace __internal {
|
||||
interface MyInternalType {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The internal namespace is mostly used for declaring things that shouldn't be globally accessible on the `bun` namespace, but are still used in public apis. You can see a pretty good example of that in the [`./fetch.d.ts`](./fetch.d.ts) file.
|
||||
|
||||
## Testing Types
|
||||
|
||||
We test our type definitions using a special test file at `fixture/index.ts`. This file contains TypeScript code that exercises our type definitions, but is never actually executed - it's only used to verify that the types work correctly.
|
||||
|
||||
The test file is type-checked in two different environments:
|
||||
|
||||
1. With `lib.dom.d.ts` included - This simulates usage in a browser environment where DOM types are available
|
||||
2. Without `lib.dom.d.ts` - This simulates usage in a server-like environment without DOM types
|
||||
|
||||
Your type definitions must work properly in both environments. This ensures that Bun's types are compatible regardless of whether DOM types are present or not.
|
||||
|
||||
For example, if you're adding types for a new API, you should just add code to `fixture/index.ts` that uses your new API. Doesn't need to work at runtime (e.g. you can fake api keys for example), it's just checking that the types are correct.
|
||||
|
||||
## Questions
|
||||
|
||||
Feel free to open an issue or speak to any of the more TypeScript-focused team members if you need help authoring types or fixing type tests.
|
||||
2810
packages/bun-types/bun.d.ts
vendored
2810
packages/bun-types/bun.d.ts
vendored
File diff suppressed because it is too large
Load Diff
7
packages/bun-types/bun.ns.d.ts
vendored
Normal file
7
packages/bun-types/bun.ns.d.ts
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
import * as BunModule from "bun";
|
||||
|
||||
declare global {
|
||||
export import Bun = BunModule;
|
||||
}
|
||||
|
||||
export {};
|
||||
30
packages/bun-types/deprecated.d.ts
vendored
30
packages/bun-types/deprecated.d.ts
vendored
@@ -1,4 +1,19 @@
|
||||
declare module "bun" {
|
||||
interface BunMessageEvent<T> {
|
||||
/**
|
||||
* @deprecated
|
||||
*/
|
||||
initMessageEvent(
|
||||
type: string,
|
||||
bubbles?: boolean,
|
||||
cancelable?: boolean,
|
||||
data?: any,
|
||||
origin?: string,
|
||||
lastEventId?: string,
|
||||
source?: null,
|
||||
): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Renamed to `ErrorLike`
|
||||
*/
|
||||
@@ -38,21 +53,6 @@ declare namespace NodeJS {
|
||||
}
|
||||
}
|
||||
|
||||
declare namespace Bun {
|
||||
interface MessageEvent {
|
||||
/** @deprecated */
|
||||
initMessageEvent(
|
||||
type: string,
|
||||
bubbles?: boolean,
|
||||
cancelable?: boolean,
|
||||
data?: any,
|
||||
origin?: string,
|
||||
lastEventId?: string,
|
||||
source?: null,
|
||||
): void;
|
||||
}
|
||||
}
|
||||
|
||||
interface CustomEvent<T = any> {
|
||||
/** @deprecated */
|
||||
initCustomEvent(type: string, bubbles?: boolean, cancelable?: boolean, detail?: T): void;
|
||||
|
||||
343
packages/bun-types/devserver.d.ts
vendored
343
packages/bun-types/devserver.d.ts
vendored
@@ -1,192 +1,189 @@
|
||||
export {};
|
||||
declare module "bun" {
|
||||
type HMREventNames =
|
||||
| "beforeUpdate"
|
||||
| "afterUpdate"
|
||||
| "beforeFullReload"
|
||||
| "beforePrune"
|
||||
| "invalidate"
|
||||
| "error"
|
||||
| "ws:disconnect"
|
||||
| "ws:connect";
|
||||
|
||||
declare global {
|
||||
namespace Bun {
|
||||
type HMREventNames =
|
||||
| "bun:ready"
|
||||
| "bun:beforeUpdate"
|
||||
| "bun:afterUpdate"
|
||||
| "bun:beforeFullReload"
|
||||
| "bun:beforePrune"
|
||||
| "bun:invalidate"
|
||||
| "bun:error"
|
||||
| "bun:ws:disconnect"
|
||||
| "bun:ws:connect";
|
||||
/**
|
||||
* The event names for the dev server
|
||||
*/
|
||||
type HMREvent = `bun:${HMREventNames}` | (string & {});
|
||||
}
|
||||
|
||||
interface ImportMeta {
|
||||
/**
|
||||
* Hot module replacement APIs. This value is `undefined` in production and
|
||||
* can be used in an `if` statement to check if HMR APIs are available
|
||||
*
|
||||
* ```ts
|
||||
* if (import.meta.hot) {
|
||||
* // HMR APIs are available
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* However, this check is usually not needed as Bun will dead-code-eliminate
|
||||
* calls to all of the HMR APIs in production builds.
|
||||
*
|
||||
* https://bun.sh/docs/bundler/hmr
|
||||
*/
|
||||
hot: {
|
||||
/**
|
||||
* The event names for the dev server
|
||||
*/
|
||||
type HMREvent = `bun:${HMREventNames}` | (string & {});
|
||||
}
|
||||
|
||||
interface ImportMeta {
|
||||
/**
|
||||
* Hot module replacement APIs. This value is `undefined` in production and
|
||||
* can be used in an `if` statement to check if HMR APIs are available
|
||||
* `import.meta.hot.data` maintains state between module instances during
|
||||
* hot replacement, enabling data transfer from previous to new versions.
|
||||
* When `import.meta.hot.data` is written to, Bun will mark this module as
|
||||
* capable of self-accepting (equivalent of calling `accept()`).
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* if (import.meta.hot) {
|
||||
* // HMR APIs are available
|
||||
* }
|
||||
* const root = import.meta.hot.data.root ??= createRoot(elem);
|
||||
* root.render(<App />); // re-use an existing root
|
||||
* ```
|
||||
*
|
||||
* However, this check is usually not needed as Bun will dead-code-eliminate
|
||||
* calls to all of the HMR APIs in production builds.
|
||||
* In production, `data` is inlined to be `{}`. This is handy because Bun
|
||||
* knows it can minify `{}.prop ??= value` into `value` in production.
|
||||
*
|
||||
*
|
||||
* https://bun.sh/docs/bundler/hmr
|
||||
*/
|
||||
hot: {
|
||||
/**
|
||||
* `import.meta.hot.data` maintains state between module instances during
|
||||
* hot replacement, enabling data transfer from previous to new versions.
|
||||
* When `import.meta.hot.data` is written to, Bun will mark this module as
|
||||
* capable of self-accepting (equivalent of calling `accept()`).
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const root = import.meta.hot.data.root ??= createRoot(elem);
|
||||
* root.render(<App />); // re-use an existing root
|
||||
* ```
|
||||
*
|
||||
* In production, `data` is inlined to be `{}`. This is handy because Bun
|
||||
* knows it can minify `{}.prop ??= value` into `value` in production.
|
||||
*/
|
||||
data: any;
|
||||
data: any;
|
||||
|
||||
/**
|
||||
* Indicate that this module can be replaced simply by re-evaluating the
|
||||
* file. After a hot update, importers of this module will be
|
||||
* automatically patched.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import { getCount } from "./foo";
|
||||
*
|
||||
* console.log("count is ", getCount());
|
||||
*
|
||||
* import.meta.hot.accept();
|
||||
* ```
|
||||
*/
|
||||
accept(): void;
|
||||
/**
|
||||
* Indicate that this module can be replaced simply by re-evaluating the
|
||||
* file. After a hot update, importers of this module will be
|
||||
* automatically patched.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import { getCount } from "./foo";
|
||||
*
|
||||
* console.log("count is ", getCount());
|
||||
*
|
||||
* import.meta.hot.accept();
|
||||
* ```
|
||||
*/
|
||||
accept(): void;
|
||||
|
||||
/**
|
||||
* Indicate that this module can be replaced by evaluating the new module,
|
||||
* and then calling the callback with the new module. In this mode, the
|
||||
* importers do not get patched. This is to match Vite, which is unable
|
||||
* to patch their import statements. Prefer using `import.meta.hot.accept()`
|
||||
* without an argument as it usually makes your code easier to understand.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* export const count = 0;
|
||||
*
|
||||
* import.meta.hot.accept((newModule) => {
|
||||
* if (newModule) {
|
||||
* // newModule is undefined when SyntaxError happened
|
||||
* console.log('updated: count is now ', newModule.count)
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* In production, calls to this are dead-code-eliminated.
|
||||
*/
|
||||
accept(cb: (newModule: any | undefined) => void): void;
|
||||
/**
|
||||
* Indicate that this module can be replaced by evaluating the new module,
|
||||
* and then calling the callback with the new module. In this mode, the
|
||||
* importers do not get patched. This is to match Vite, which is unable
|
||||
* to patch their import statements. Prefer using `import.meta.hot.accept()`
|
||||
* without an argument as it usually makes your code easier to understand.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* export const count = 0;
|
||||
*
|
||||
* import.meta.hot.accept((newModule) => {
|
||||
* if (newModule) {
|
||||
* // newModule is undefined when SyntaxError happened
|
||||
* console.log('updated: count is now ', newModule.count)
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* In production, calls to this are dead-code-eliminated.
|
||||
*/
|
||||
accept(cb: (newModule: any | undefined) => void): void;
|
||||
|
||||
/**
|
||||
* Indicate that a dependency's module can be accepted. When the dependency
|
||||
* is updated, the callback will be called with the new module.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import.meta.hot.accept('./foo', (newModule) => {
|
||||
* if (newModule) {
|
||||
* // newModule is undefined when SyntaxError happened
|
||||
* console.log('updated: count is now ', newModule.count)
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
accept(specifier: string, callback: (newModule: any) => void): void;
|
||||
/**
|
||||
* Indicate that a dependency's module can be accepted. When the dependency
|
||||
* is updated, the callback will be called with the new module.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import.meta.hot.accept('./foo', (newModule) => {
|
||||
* if (newModule) {
|
||||
* // newModule is undefined when SyntaxError happened
|
||||
* console.log('updated: count is now ', newModule.count)
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
accept(specifier: string, callback: (newModule: any) => void): void;
|
||||
|
||||
/**
|
||||
* Indicate that a dependency's module can be accepted. This variant
|
||||
* accepts an array of dependencies, where the callback will receive
|
||||
* the one updated module, and `undefined` for the rest.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*/
|
||||
accept(specifiers: string[], callback: (newModules: (any | undefined)[]) => void): void;
|
||||
/**
|
||||
* Indicate that a dependency's module can be accepted. This variant
|
||||
* accepts an array of dependencies, where the callback will receive
|
||||
* the one updated module, and `undefined` for the rest.
|
||||
*
|
||||
* When `import.meta.hot.accept` is not used, the page will reload when
|
||||
* the file updates, and a console message shows which files were checked.
|
||||
*/
|
||||
accept(specifiers: string[], callback: (newModules: (any | undefined)[]) => void): void;
|
||||
|
||||
/**
|
||||
* Attach an on-dispose callback. This is called:
|
||||
* - Just before the module is replaced with another copy (before the next is loaded)
|
||||
* - After the module is detached (removing all imports to this module)
|
||||
*
|
||||
* This callback is not called on route navigation or when the browser tab closes.
|
||||
*
|
||||
* Returning a promise will delay module replacement until the module is
|
||||
* disposed. All dispose callbacks are called in parallel.
|
||||
*/
|
||||
dispose(cb: (data: any) => void | Promise<void>): void;
|
||||
/**
|
||||
* Attach an on-dispose callback. This is called:
|
||||
* - Just before the module is replaced with another copy (before the next is loaded)
|
||||
* - After the module is detached (removing all imports to this module)
|
||||
*
|
||||
* This callback is not called on route navigation or when the browser tab closes.
|
||||
*
|
||||
* Returning a promise will delay module replacement until the module is
|
||||
* disposed. All dispose callbacks are called in parallel.
|
||||
*/
|
||||
dispose(cb: (data: any) => void | Promise<void>): void;
|
||||
|
||||
/**
|
||||
* No-op
|
||||
* @deprecated
|
||||
*/
|
||||
decline(): void;
|
||||
/**
|
||||
* No-op
|
||||
* @deprecated
|
||||
*/
|
||||
decline(): void;
|
||||
|
||||
// NOTE TO CONTRIBUTORS ////////////////////////////////////////
|
||||
// Callback is currently never called for `.prune()` //
|
||||
// so the types are commented out until we support it. //
|
||||
////////////////////////////////////////////////////////////////
|
||||
// /**
|
||||
// * Attach a callback that is called when the module is removed from the module graph.
|
||||
// *
|
||||
// * This can be used to clean up resources that were created when the module was loaded.
|
||||
// * Unlike `import.meta.hot.dispose()`, this pairs much better with `accept` and `data` to manage stateful resources.
|
||||
// *
|
||||
// * @example
|
||||
// * ```ts
|
||||
// * export const ws = (import.meta.hot.data.ws ??= new WebSocket(location.origin));
|
||||
// *
|
||||
// * import.meta.hot.prune(() => {
|
||||
// * ws.close();
|
||||
// * });
|
||||
// * ```
|
||||
// */
|
||||
// prune(callback: () => void): void;
|
||||
// NOTE TO CONTRIBUTORS ////////////////////////////////////////
|
||||
// Callback is currently never called for `.prune()` //
|
||||
// so the types are commented out until we support it. //
|
||||
////////////////////////////////////////////////////////////////
|
||||
// /**
|
||||
// * Attach a callback that is called when the module is removed from the module graph.
|
||||
// *
|
||||
// * This can be used to clean up resources that were created when the module was loaded.
|
||||
// * Unlike `import.meta.hot.dispose()`, this pairs much better with `accept` and `data` to manage stateful resources.
|
||||
// *
|
||||
// * @example
|
||||
// * ```ts
|
||||
// * export const ws = (import.meta.hot.data.ws ??= new WebSocket(location.origin));
|
||||
// *
|
||||
// * import.meta.hot.prune(() => {
|
||||
// * ws.close();
|
||||
// * });
|
||||
// * ```
|
||||
// */
|
||||
// prune(callback: () => void): void;
|
||||
|
||||
/**
|
||||
* Listen for an event from the dev server
|
||||
*
|
||||
* For compatibility with Vite, event names are also available via vite:* prefix instead of bun:*.
|
||||
*
|
||||
* https://bun.sh/docs/bundler/hmr#import-meta-hot-on-and-off
|
||||
* @param event The event to listen to
|
||||
* @param callback The callback to call when the event is emitted
|
||||
*/
|
||||
on(event: Bun.HMREvent, callback: () => void): void;
|
||||
/**
|
||||
* Listen for an event from the dev server
|
||||
*
|
||||
* For compatibility with Vite, event names are also available via vite:* prefix instead of bun:*.
|
||||
*
|
||||
* https://bun.sh/docs/bundler/hmr#import-meta-hot-on-and-off
|
||||
* @param event The event to listen to
|
||||
* @param callback The callback to call when the event is emitted
|
||||
*/
|
||||
on(event: Bun.HMREvent, callback: () => void): void;
|
||||
|
||||
/**
|
||||
* Stop listening for an event from the dev server
|
||||
*
|
||||
* For compatibility with Vite, event names are also available via vite:* prefix instead of bun:*.
|
||||
*
|
||||
* https://bun.sh/docs/bundler/hmr#import-meta-hot-on-and-off
|
||||
* @param event The event to stop listening to
|
||||
* @param callback The callback to stop listening to
|
||||
*/
|
||||
off(event: Bun.HMREvent, callback: () => void): void;
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Stop listening for an event from the dev server
|
||||
*
|
||||
* For compatibility with Vite, event names are also available via vite:* prefix instead of bun:*.
|
||||
*
|
||||
* https://bun.sh/docs/bundler/hmr#import-meta-hot-on-and-off
|
||||
* @param event The event to stop listening to
|
||||
* @param callback The callback to stop listening to
|
||||
*/
|
||||
off(event: Bun.HMREvent, callback: () => void): void;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -19,13 +19,7 @@ declare module "*/bun.lock" {
|
||||
}
|
||||
|
||||
declare module "*.html" {
|
||||
// In Bun v1.2, we might change this to Bun.HTMLBundle
|
||||
// In Bun v1.2, this might change to Bun.HTMLBundle
|
||||
var contents: any;
|
||||
export = contents;
|
||||
}
|
||||
|
||||
declare module "*.svg" {
|
||||
// Bun 1.2.3 added support for frontend dev server
|
||||
var contents: `${string}.svg`;
|
||||
export = contents;
|
||||
}
|
||||
223
packages/bun-types/fetch.d.ts
vendored
223
packages/bun-types/fetch.d.ts
vendored
@@ -1,161 +1,72 @@
|
||||
interface Headers {
|
||||
/**
|
||||
* Convert {@link Headers} to a plain JavaScript object.
|
||||
*
|
||||
* About 10x faster than `Object.fromEntries(headers.entries())`
|
||||
*
|
||||
* Called when you run `JSON.stringify(headers)`
|
||||
*
|
||||
* Does not preserve insertion order. Well-known header names are lowercased. Other header names are left as-is.
|
||||
*/
|
||||
toJSON(): Record<string, string>;
|
||||
/**
|
||||
* Get the total number of headers
|
||||
*/
|
||||
readonly count: number;
|
||||
/**
|
||||
* Get all headers matching the name
|
||||
*
|
||||
* Only supports `"Set-Cookie"`. All other headers are empty arrays.
|
||||
*
|
||||
* @param name - The header name to get
|
||||
*
|
||||
* @returns An array of header values
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const headers = new Headers();
|
||||
* headers.append("Set-Cookie", "foo=bar");
|
||||
* headers.append("Set-Cookie", "baz=qux");
|
||||
* headers.getAll("Set-Cookie"); // ["foo=bar", "baz=qux"]
|
||||
* ```
|
||||
*/
|
||||
getAll(name: "set-cookie" | "Set-Cookie"): string[];
|
||||
}
|
||||
/*
|
||||
|
||||
var Headers: {
|
||||
prototype: Headers;
|
||||
new (init?: Bun.HeadersInit): Headers;
|
||||
};
|
||||
This file does not declare any global types.
|
||||
|
||||
interface Request {
|
||||
headers: Headers;
|
||||
}
|
||||
That should only happen in [./globals.d.ts](./globals.d.ts)
|
||||
so that our documentation generator can pick it up, as it
|
||||
expects all globals to be declared in one file.
|
||||
|
||||
var Request: {
|
||||
prototype: Request;
|
||||
new (requestInfo: string, requestInit?: RequestInit): Request;
|
||||
new (requestInfo: RequestInit & { url: string }): Request;
|
||||
new (requestInfo: Request, requestInit?: RequestInit): Request;
|
||||
};
|
||||
|
||||
var Response: {
|
||||
new (body?: Bun.BodyInit | null | undefined, init?: Bun.ResponseInit | undefined): Response;
|
||||
/**
|
||||
* Create a new {@link Response} with a JSON body
|
||||
*
|
||||
* @param body - The body of the response
|
||||
* @param options - options to pass to the response
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* ```ts
|
||||
* const response = Response.json({hi: "there"});
|
||||
* console.assert(
|
||||
* await response.text(),
|
||||
* `{"hi":"there"}`
|
||||
* );
|
||||
* ```
|
||||
* -------
|
||||
*
|
||||
* This is syntactic sugar for:
|
||||
* ```js
|
||||
* new Response(JSON.stringify(body), {headers: { "Content-Type": "application/json" }})
|
||||
* ```
|
||||
* @link https://github.com/whatwg/fetch/issues/1389
|
||||
*/
|
||||
json(body?: any, options?: Bun.ResponseInit | number): Response;
|
||||
|
||||
/**
|
||||
* Create a new {@link Response} that redirects to url
|
||||
*
|
||||
* @param url - the URL to redirect to
|
||||
* @param status - the HTTP status code to use for the redirect
|
||||
*/
|
||||
// tslint:disable-next-line:unified-signatures
|
||||
redirect(url: string, status?: number): Response;
|
||||
|
||||
/**
|
||||
* Create a new {@link Response} that redirects to url
|
||||
*
|
||||
* @param url - the URL to redirect to
|
||||
* @param options - options to pass to the response
|
||||
*/
|
||||
// tslint:disable-next-line:unified-signatures
|
||||
redirect(url: string, options?: Bun.ResponseInit): Response;
|
||||
|
||||
/**
|
||||
* Create a new {@link Response} that has a network error
|
||||
*/
|
||||
error(): Response;
|
||||
};
|
||||
|
||||
type _BunTLSOptions = import("bun").TLSOptions;
|
||||
interface BunFetchRequestInitTLS extends _BunTLSOptions {
|
||||
/**
|
||||
* Custom function to check the server identity
|
||||
* @param hostname - The hostname of the server
|
||||
* @param cert - The certificate of the server
|
||||
* @returns An error if the server is unauthorized, otherwise undefined
|
||||
*/
|
||||
checkServerIdentity?: NonNullable<import("node:tls").ConnectionOptions["checkServerIdentity"]>;
|
||||
}
|
||||
|
||||
/**
|
||||
* BunFetchRequestInit represents additional options that Bun supports in `fetch()` only.
|
||||
*
|
||||
* Bun extends the `fetch` API with some additional options, except
|
||||
* this interface is not quite a `RequestInit`, because they won't work
|
||||
* if passed to `new Request()`. This is why it's a separate type.
|
||||
*/
|
||||
interface BunFetchRequestInit extends RequestInit {
|
||||
/**
|
||||
* Override the default TLS options
|
||||
*/
|
||||
tls?: BunFetchRequestInitTLS;
|
||||
|
||||
declare module "bun" {
|
||||
type HeadersInit = string[][] | Record<string, string | ReadonlyArray<string>> | Headers;
|
||||
type BodyInit = ReadableStream | Bun.XMLHttpRequestBodyInit | URLSearchParams | AsyncGenerator<Uint8Array>;
|
||||
|
||||
namespace __internal {
|
||||
type LibOrFallbackHeaders = LibDomIsLoaded extends true ? {} : import("undici-types").Headers;
|
||||
type LibOrFallbackRequest = LibDomIsLoaded extends true ? {} : import("undici-types").Request;
|
||||
type LibOrFallbackResponse = LibDomIsLoaded extends true ? {} : import("undici-types").Response;
|
||||
type LibOrFallbackResponseInit = LibDomIsLoaded extends true ? {} : import("undici-types").ResponseInit;
|
||||
type LibOrFallbackRequestInit = LibDomIsLoaded extends true
|
||||
? {}
|
||||
: Omit<import("undici-types").RequestInit, "body" | "headers"> & {
|
||||
body?: Bun.BodyInit | null | undefined;
|
||||
headers?: Bun.HeadersInit;
|
||||
};
|
||||
|
||||
interface BunHeadersOverride extends LibOrFallbackHeaders {
|
||||
/**
|
||||
* Convert {@link Headers} to a plain JavaScript object.
|
||||
*
|
||||
* About 10x faster than `Object.fromEntries(headers.entries())`
|
||||
*
|
||||
* Called when you run `JSON.stringify(headers)`
|
||||
*
|
||||
* Does not preserve insertion order. Well-known header names are lowercased. Other header names are left as-is.
|
||||
*/
|
||||
toJSON(): Record<string, string>;
|
||||
|
||||
/**
|
||||
* Get the total number of headers
|
||||
*/
|
||||
readonly count: number;
|
||||
|
||||
/**
|
||||
* Get all headers matching the name
|
||||
*
|
||||
* Only supports `"Set-Cookie"`. All other headers are empty arrays.
|
||||
*
|
||||
* @param name - The header name to get
|
||||
*
|
||||
* @returns An array of header values
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const headers = new Headers();
|
||||
* headers.append("Set-Cookie", "foo=bar");
|
||||
* headers.append("Set-Cookie", "baz=qux");
|
||||
* headers.getAll("Set-Cookie"); // ["foo=bar", "baz=qux"]
|
||||
* ```
|
||||
*/
|
||||
getAll(name: "set-cookie" | "Set-Cookie"): string[];
|
||||
}
|
||||
|
||||
interface BunRequestOverride extends LibOrFallbackRequest {
|
||||
headers: BunHeadersOverride;
|
||||
}
|
||||
|
||||
interface BunResponseOverride extends LibOrFallbackResponse {
|
||||
headers: BunHeadersOverride;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var fetch: {
|
||||
/**
|
||||
* Send a HTTP(s) request
|
||||
*
|
||||
* @param request Request object
|
||||
* @param init A structured value that contains settings for the fetch() request.
|
||||
*
|
||||
* @returns A promise that resolves to {@link Response} object.
|
||||
*/
|
||||
(request: Request, init?: BunFetchRequestInit): Promise<Response>;
|
||||
|
||||
/**
|
||||
* Send a HTTP(s) request
|
||||
*
|
||||
* @param url URL string
|
||||
* @param init A structured value that contains settings for the fetch() request.
|
||||
*
|
||||
* @returns A promise that resolves to {@link Response} object.
|
||||
*/
|
||||
(url: string | URL | Request, init?: BunFetchRequestInit): Promise<Response>;
|
||||
|
||||
(input: string | URL | globalThis.Request, init?: BunFetchRequestInit): Promise<Response>;
|
||||
|
||||
/**
|
||||
* Start the DNS resolution, TCP connection, and TLS handshake for a request
|
||||
* before the request is actually sent.
|
||||
*
|
||||
* This can reduce the latency of a request when you know there's some
|
||||
* long-running task that will delay the request starting.
|
||||
*
|
||||
* This is a bun-specific API and is not part of the Fetch API specification.
|
||||
*/
|
||||
preconnect(url: string | URL): void;
|
||||
};
|
||||
|
||||
27
packages/bun-types/ffi.d.ts
vendored
27
packages/bun-types/ffi.d.ts
vendored
@@ -13,6 +13,8 @@
|
||||
* that convert JavaScript types to C types and back. Internally,
|
||||
* bun uses [tinycc](https://github.com/TinyCC/tinycc), so a big thanks
|
||||
* goes to Fabrice Bellard and TinyCC maintainers for making this possible.
|
||||
*
|
||||
* @category FFI
|
||||
*/
|
||||
declare module "bun:ffi" {
|
||||
enum FFIType {
|
||||
@@ -543,14 +545,6 @@ declare module "bun:ffi" {
|
||||
|
||||
type Symbols = Readonly<Record<string, FFIFunction>>;
|
||||
|
||||
// /**
|
||||
// * Compile a callback function
|
||||
// *
|
||||
// * Returns a function pointer
|
||||
// *
|
||||
// */
|
||||
// export function callback(ffi: FFIFunction, cb: Function): number;
|
||||
|
||||
interface Library<Fns extends Symbols> {
|
||||
symbols: ConvertFns<Fns>;
|
||||
|
||||
@@ -608,6 +602,8 @@ declare module "bun:ffi" {
|
||||
* that convert JavaScript types to C types and back. Internally,
|
||||
* bun uses [tinycc](https://github.com/TinyCC/tinycc), so a big thanks
|
||||
* goes to Fabrice Bellard and TinyCC maintainers for making this possible.
|
||||
*
|
||||
* @category FFI
|
||||
*/
|
||||
function dlopen<Fns extends Record<string, FFIFunction>>(
|
||||
name: string | import("bun").BunFile | URL,
|
||||
@@ -626,9 +622,9 @@ declare module "bun:ffi" {
|
||||
* JavaScript:
|
||||
* ```js
|
||||
* import { cc } from "bun:ffi";
|
||||
* import hello from "./hello.c" with {type: "file"};
|
||||
* import source from "./hello.c" with {type: "file"};
|
||||
* const {symbols: {hello}} = cc({
|
||||
* source: hello,
|
||||
* source,
|
||||
* symbols: {
|
||||
* hello: {
|
||||
* returns: "cstring",
|
||||
@@ -681,8 +677,9 @@ declare module "bun:ffi" {
|
||||
* @example
|
||||
* ```js
|
||||
* import { cc } from "bun:ffi";
|
||||
* import source from "./hello.c" with {type: "file"};
|
||||
* const {symbols: {hello}} = cc({
|
||||
* source: hello,
|
||||
* source,
|
||||
* define: {
|
||||
* "NDEBUG": "1",
|
||||
* },
|
||||
@@ -707,8 +704,9 @@ declare module "bun:ffi" {
|
||||
* @example
|
||||
* ```js
|
||||
* import { cc } from "bun:ffi";
|
||||
* import source from "./hello.c" with {type: "file"};
|
||||
* const {symbols: {hello}} = cc({
|
||||
* source: hello,
|
||||
* source,
|
||||
* flags: ["-framework CoreFoundation", "-framework Security"],
|
||||
* symbols: {
|
||||
* hello: {
|
||||
@@ -1024,6 +1022,8 @@ declare module "bun:ffi" {
|
||||
* // Do something with rawPtr
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* @category FFI
|
||||
*/
|
||||
function ptr(view: NodeJS.TypedArray | ArrayBufferLike | DataView, byteOffset?: number): Pointer;
|
||||
|
||||
@@ -1048,8 +1048,9 @@ declare module "bun:ffi" {
|
||||
* thing to do safely. Passing an invalid pointer can crash the program and
|
||||
* reading beyond the bounds of the pointer will crash the program or cause
|
||||
* undefined behavior. Use with care!
|
||||
*
|
||||
* @category FFI
|
||||
*/
|
||||
|
||||
class CString extends String {
|
||||
/**
|
||||
* Get a string from a UTF-8 encoded C string
|
||||
|
||||
3454
packages/bun-types/globals.d.ts
vendored
3454
packages/bun-types/globals.d.ts
vendored
File diff suppressed because it is too large
Load Diff
2
packages/bun-types/html-rewriter.d.ts
vendored
2
packages/bun-types/html-rewriter.d.ts
vendored
@@ -147,6 +147,8 @@ declare namespace HTMLRewriterTypes {
|
||||
* });
|
||||
* rewriter.transform(await fetch("https://remix.run"));
|
||||
* ```
|
||||
*
|
||||
* @category HTML Manipulation
|
||||
*/
|
||||
declare class HTMLRewriter {
|
||||
constructor();
|
||||
|
||||
24
packages/bun-types/index.d.ts
vendored
24
packages/bun-types/index.d.ts
vendored
@@ -1,24 +1,26 @@
|
||||
// Project: https://github.com/oven-sh/bun
|
||||
// Definitions by: Jarred Sumner <https://github.com/Jarred-Sumner>
|
||||
// Definitions by: Bun Contributors <https://github.com/oven-sh/bun/graphs/contributors>
|
||||
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
|
||||
|
||||
/// <reference lib="esnext" />
|
||||
/// <reference types="ws" />
|
||||
/// <reference types="node" />
|
||||
|
||||
// contributors: uncomment this to detect conflicts with lib.dom.d.ts
|
||||
// /// <reference lib="dom" />
|
||||
|
||||
/// <reference path="./bun.d.ts" />
|
||||
/// <reference path="./globals.d.ts" />
|
||||
/// <reference path="./s3.d.ts" />
|
||||
/// <reference path="./fetch.d.ts" />
|
||||
/// <reference path="./overrides.d.ts" />
|
||||
/// <reference path="./bun.d.ts" />
|
||||
/// <reference path="./extensions.d.ts" />
|
||||
/// <reference path="./devserver.d.ts" />
|
||||
/// <reference path="./ffi.d.ts" />
|
||||
/// <reference path="./test.d.ts" />
|
||||
/// <reference path="./html-rewriter.d.ts" />
|
||||
/// <reference path="./jsc.d.ts" />
|
||||
/// <reference path="./sqlite.d.ts" />
|
||||
/// <reference path="./test.d.ts" />
|
||||
/// <reference path="./wasm.d.ts" />
|
||||
/// <reference path="./overrides.d.ts" />
|
||||
/// <reference path="./deprecated.d.ts" />
|
||||
/// <reference path="./ambient.d.ts" />
|
||||
/// <reference path="./devserver.d.ts" />
|
||||
/// <reference path="./redis.d.ts" />
|
||||
/// <reference path="./bun.ns.d.ts" />
|
||||
|
||||
// @ts-ignore Must disable this so it doesn't conflict with the DOM onmessage type, but still
|
||||
// allows us to declare our own globals that Node's types can "see" and not conflict with
|
||||
declare var onmessage: never;
|
||||
|
||||
160
packages/bun-types/overrides.d.ts
vendored
160
packages/bun-types/overrides.d.ts
vendored
@@ -1,18 +1,160 @@
|
||||
export {};
|
||||
|
||||
import type { BunFile, Env, PathLike } from "bun";
|
||||
|
||||
declare global {
|
||||
namespace NodeJS {
|
||||
interface ProcessEnv extends Bun.Env, ImportMetaEnv {}
|
||||
|
||||
interface Process {
|
||||
readonly version: string;
|
||||
browser: boolean;
|
||||
|
||||
/**
|
||||
* Whether you are using Bun
|
||||
*/
|
||||
isBun: true;
|
||||
|
||||
/**
|
||||
* The current git sha of Bun
|
||||
*/
|
||||
revision: string;
|
||||
|
||||
reallyExit(code?: number): never;
|
||||
dlopen(module: { exports: any }, filename: string, flags?: number): void;
|
||||
_exiting: boolean;
|
||||
noDeprecation: boolean;
|
||||
|
||||
binding(m: "constants"): {
|
||||
os: typeof import("node:os").constants;
|
||||
fs: typeof import("node:fs").constants;
|
||||
crypto: typeof import("node:crypto").constants;
|
||||
zlib: typeof import("node:zlib").constants;
|
||||
trace: {
|
||||
TRACE_EVENT_PHASE_BEGIN: number;
|
||||
TRACE_EVENT_PHASE_END: number;
|
||||
TRACE_EVENT_PHASE_COMPLETE: number;
|
||||
TRACE_EVENT_PHASE_INSTANT: number;
|
||||
TRACE_EVENT_PHASE_ASYNC_BEGIN: number;
|
||||
TRACE_EVENT_PHASE_ASYNC_STEP_INTO: number;
|
||||
TRACE_EVENT_PHASE_ASYNC_STEP_PAST: number;
|
||||
TRACE_EVENT_PHASE_ASYNC_END: number;
|
||||
TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN: number;
|
||||
TRACE_EVENT_PHASE_NESTABLE_ASYNC_END: number;
|
||||
TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT: number;
|
||||
TRACE_EVENT_PHASE_FLOW_BEGIN: number;
|
||||
TRACE_EVENT_PHASE_FLOW_STEP: number;
|
||||
TRACE_EVENT_PHASE_FLOW_END: number;
|
||||
TRACE_EVENT_PHASE_METADATA: number;
|
||||
TRACE_EVENT_PHASE_COUNTER: number;
|
||||
TRACE_EVENT_PHASE_SAMPLE: number;
|
||||
TRACE_EVENT_PHASE_CREATE_OBJECT: number;
|
||||
TRACE_EVENT_PHASE_SNAPSHOT_OBJECT: number;
|
||||
TRACE_EVENT_PHASE_DELETE_OBJECT: number;
|
||||
TRACE_EVENT_PHASE_MEMORY_DUMP: number;
|
||||
TRACE_EVENT_PHASE_MARK: number;
|
||||
TRACE_EVENT_PHASE_CLOCK_SYNC: number;
|
||||
TRACE_EVENT_PHASE_ENTER_CONTEXT: number;
|
||||
TRACE_EVENT_PHASE_LEAVE_CONTEXT: number;
|
||||
TRACE_EVENT_PHASE_LINK_IDS: number;
|
||||
};
|
||||
};
|
||||
binding(m: "uv"): {
|
||||
errname(code: number): string;
|
||||
UV_E2BIG: number;
|
||||
UV_EACCES: number;
|
||||
UV_EADDRINUSE: number;
|
||||
UV_EADDRNOTAVAIL: number;
|
||||
UV_EAFNOSUPPORT: number;
|
||||
UV_EAGAIN: number;
|
||||
UV_EAI_ADDRFAMILY: number;
|
||||
UV_EAI_AGAIN: number;
|
||||
UV_EAI_BADFLAGS: number;
|
||||
UV_EAI_BADHINTS: number;
|
||||
UV_EAI_CANCELED: number;
|
||||
UV_EAI_FAIL: number;
|
||||
UV_EAI_FAMILY: number;
|
||||
UV_EAI_MEMORY: number;
|
||||
UV_EAI_NODATA: number;
|
||||
UV_EAI_NONAME: number;
|
||||
UV_EAI_OVERFLOW: number;
|
||||
UV_EAI_PROTOCOL: number;
|
||||
UV_EAI_SERVICE: number;
|
||||
UV_EAI_SOCKTYPE: number;
|
||||
UV_EALREADY: number;
|
||||
UV_EBADF: number;
|
||||
UV_EBUSY: number;
|
||||
UV_ECANCELED: number;
|
||||
UV_ECHARSET: number;
|
||||
UV_ECONNABORTED: number;
|
||||
UV_ECONNREFUSED: number;
|
||||
UV_ECONNRESET: number;
|
||||
UV_EDESTADDRREQ: number;
|
||||
UV_EEXIST: number;
|
||||
UV_EFAULT: number;
|
||||
UV_EFBIG: number;
|
||||
UV_EHOSTUNREACH: number;
|
||||
UV_EINTR: number;
|
||||
UV_EINVAL: number;
|
||||
UV_EIO: number;
|
||||
UV_EISCONN: number;
|
||||
UV_EISDIR: number;
|
||||
UV_ELOOP: number;
|
||||
UV_EMFILE: number;
|
||||
UV_EMSGSIZE: number;
|
||||
UV_ENAMETOOLONG: number;
|
||||
UV_ENETDOWN: number;
|
||||
UV_ENETUNREACH: number;
|
||||
UV_ENFILE: number;
|
||||
UV_ENOBUFS: number;
|
||||
UV_ENODEV: number;
|
||||
UV_ENOENT: number;
|
||||
UV_ENOMEM: number;
|
||||
UV_ENONET: number;
|
||||
UV_ENOPROTOOPT: number;
|
||||
UV_ENOSPC: number;
|
||||
UV_ENOSYS: number;
|
||||
UV_ENOTCONN: number;
|
||||
UV_ENOTDIR: number;
|
||||
UV_ENOTEMPTY: number;
|
||||
UV_ENOTSOCK: number;
|
||||
UV_ENOTSUP: number;
|
||||
UV_EOVERFLOW: number;
|
||||
UV_EPERM: number;
|
||||
UV_EPIPE: number;
|
||||
UV_EPROTO: number;
|
||||
UV_EPROTONOSUPPORT: number;
|
||||
UV_EPROTOTYPE: number;
|
||||
UV_ERANGE: number;
|
||||
UV_EROFS: number;
|
||||
UV_ESHUTDOWN: number;
|
||||
UV_ESPIPE: number;
|
||||
UV_ESRCH: number;
|
||||
UV_ETIMEDOUT: number;
|
||||
UV_ETXTBSY: number;
|
||||
UV_EXDEV: number;
|
||||
UV_UNKNOWN: number;
|
||||
UV_EOF: number;
|
||||
UV_ENXIO: number;
|
||||
UV_EMLINK: number;
|
||||
UV_EHOSTDOWN: number;
|
||||
UV_EREMOTEIO: number;
|
||||
UV_ENOTTY: number;
|
||||
UV_EFTYPE: number;
|
||||
UV_EILSEQ: number;
|
||||
UV_ESOCKTNOSUPPORT: number;
|
||||
UV_ENODATA: number;
|
||||
UV_EUNATCH: number;
|
||||
};
|
||||
binding(m: string): object;
|
||||
}
|
||||
|
||||
interface ProcessVersions extends Dict<string> {
|
||||
bun: string;
|
||||
}
|
||||
interface ProcessEnv extends Env {}
|
||||
}
|
||||
}
|
||||
|
||||
declare module "fs/promises" {
|
||||
function exists(path: PathLike): Promise<boolean>;
|
||||
function exists(path: Bun.PathLike): Promise<boolean>;
|
||||
}
|
||||
|
||||
declare module "tls" {
|
||||
@@ -22,7 +164,7 @@ declare module "tls" {
|
||||
* the well-known CAs curated by Mozilla. Mozilla's CAs are completely
|
||||
* replaced when CAs are explicitly specified using this option.
|
||||
*/
|
||||
ca?: string | Buffer | NodeJS.TypedArray | BunFile | Array<string | Buffer | BunFile> | undefined;
|
||||
ca?: string | Buffer | NodeJS.TypedArray | Bun.BunFile | Array<string | Buffer | Bun.BunFile> | undefined;
|
||||
/**
|
||||
* Cert chains in PEM format. One cert chain should be provided per
|
||||
* private key. Each cert chain should consist of the PEM formatted
|
||||
@@ -38,8 +180,8 @@ declare module "tls" {
|
||||
| string
|
||||
| Buffer
|
||||
| NodeJS.TypedArray
|
||||
| BunFile
|
||||
| Array<string | Buffer | NodeJS.TypedArray | BunFile>
|
||||
| Bun.BunFile
|
||||
| Array<string | Buffer | NodeJS.TypedArray | Bun.BunFile>
|
||||
| undefined;
|
||||
/**
|
||||
* Private keys in PEM format. PEM allows the option of private keys
|
||||
@@ -54,9 +196,9 @@ declare module "tls" {
|
||||
key?:
|
||||
| string
|
||||
| Buffer
|
||||
| BunFile
|
||||
| Bun.BunFile
|
||||
| NodeJS.TypedArray
|
||||
| Array<string | Buffer | BunFile | NodeJS.TypedArray | KeyObject>
|
||||
| Array<string | Buffer | Bun.BunFile | NodeJS.TypedArray | KeyObject>
|
||||
| undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -9,14 +9,14 @@
|
||||
"directory": "packages/bun-types"
|
||||
},
|
||||
"files": [
|
||||
"*.d.ts",
|
||||
"./*.d.ts",
|
||||
"docs/**/*.md",
|
||||
"docs/*.md"
|
||||
],
|
||||
"homepage": "https://bun.sh",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"@types/ws": "~8.5.10"
|
||||
"@types/ws": "*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^1.5.3",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"prebuild": "echo $(pwd)",
|
||||
"copy-docs": "rm -rf docs && cp -rL ../../docs/ ./docs && find ./docs -type f -name '*.md' -exec sed -i 's/\\$BUN_LATEST_VERSION/'\"${BUN_VERSION#bun-v}\"'/g' {} +",
|
||||
"build": "bun run copy-docs && bun scripts/build.ts && bun run fmt",
|
||||
"build": "bun run copy-docs && bun scripts/build.ts",
|
||||
"test": "tsc",
|
||||
"fmt": "echo $(which biome) && biome format --write ."
|
||||
},
|
||||
|
||||
610
packages/bun-types/redis.d.ts
vendored
Normal file
610
packages/bun-types/redis.d.ts
vendored
Normal file
@@ -0,0 +1,610 @@
|
||||
declare module "bun" {
|
||||
export interface RedisOptions {
|
||||
/**
|
||||
* URL to connect to, defaults to "redis://localhost:6379"
|
||||
* Supported protocols: redis://, rediss://, redis+unix://, redis+tls://
|
||||
*/
|
||||
url?: string;
|
||||
|
||||
/**
|
||||
* Connection timeout in milliseconds
|
||||
* @default 10000
|
||||
*/
|
||||
connectionTimeout?: number;
|
||||
|
||||
/**
|
||||
* Idle timeout in milliseconds
|
||||
* @default 0 (no timeout)
|
||||
*/
|
||||
idleTimeout?: number;
|
||||
|
||||
/**
|
||||
* Whether to automatically reconnect
|
||||
* @default true
|
||||
*/
|
||||
autoReconnect?: boolean;
|
||||
|
||||
/**
|
||||
* Maximum number of reconnection attempts
|
||||
* @default 10
|
||||
*/
|
||||
maxRetries?: number;
|
||||
|
||||
/**
|
||||
* Whether to queue commands when disconnected
|
||||
* @default true
|
||||
*/
|
||||
enableOfflineQueue?: boolean;
|
||||
|
||||
/**
|
||||
* TLS options
|
||||
* Can be a boolean or an object with TLS options
|
||||
*/
|
||||
tls?:
|
||||
| boolean
|
||||
| {
|
||||
key?: string | Buffer;
|
||||
cert?: string | Buffer;
|
||||
ca?: string | Buffer | Array<string | Buffer>;
|
||||
rejectUnauthorized?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* Whether to enable auto-pipelining
|
||||
* @default true
|
||||
*/
|
||||
enableAutoPipelining?: boolean;
|
||||
}
|
||||
|
||||
export class RedisClient {
|
||||
/**
|
||||
* Creates a new Redis client
|
||||
* @param url URL to connect to, defaults to process.env.VALKEY_URL, process.env.REDIS_URL, or "valkey://localhost:6379"
|
||||
* @param options Additional options
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const valkey = new RedisClient();
|
||||
*
|
||||
* await valkey.set("hello", "world");
|
||||
*
|
||||
* console.log(await valkey.get("hello"));
|
||||
* ```
|
||||
*/
|
||||
constructor(url?: string, options?: RedisOptions);
|
||||
|
||||
/**
|
||||
* Whether the client is connected to the Redis server
|
||||
*/
|
||||
readonly connected: boolean;
|
||||
|
||||
/**
|
||||
* Amount of data buffered in bytes
|
||||
*/
|
||||
readonly bufferedAmount: number;
|
||||
|
||||
/**
|
||||
* Callback fired when the client connects to the Redis server
|
||||
*/
|
||||
onconnect: ((this: RedisClient) => void) | null;
|
||||
|
||||
/**
|
||||
* Callback fired when the client disconnects from the Redis server
|
||||
* @param error The error that caused the disconnection
|
||||
*/
|
||||
onclose: ((this: RedisClient, error: Error) => void) | null;
|
||||
|
||||
/**
|
||||
* Connect to the Redis server
|
||||
* @returns A promise that resolves when connected
|
||||
*/
|
||||
connect(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Disconnect from the Redis server
|
||||
*/
|
||||
close(): void;
|
||||
|
||||
/**
|
||||
* Send a raw command to the Redis server
|
||||
* @param command The command to send
|
||||
* @param args The arguments to the command
|
||||
* @returns A promise that resolves with the command result
|
||||
*/
|
||||
send(command: string, args: string[]): Promise<any>;
|
||||
|
||||
/**
|
||||
* Get the value of a key
|
||||
* @param key The key to get
|
||||
* @returns Promise that resolves with the key's value, or null if the key doesn't exist
|
||||
*/
|
||||
get(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
set(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<"OK">;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value with expiration
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param ex Set the specified expire time, in seconds
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
ex: "EX",
|
||||
seconds: number,
|
||||
): Promise<"OK">;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value with expiration
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param px Set the specified expire time, in milliseconds
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
px: "PX",
|
||||
milliseconds: number,
|
||||
): Promise<"OK">;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value with expiration at a specific Unix timestamp
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param exat Set the specified Unix time at which the key will expire, in seconds
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
exat: "EXAT",
|
||||
timestampSeconds: number,
|
||||
): Promise<"OK">;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value with expiration at a specific Unix timestamp
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param pxat Set the specified Unix time at which the key will expire, in milliseconds
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
pxat: "PXAT",
|
||||
timestampMilliseconds: number,
|
||||
): Promise<"OK">;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value only if key does not exist
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param nx Only set the key if it does not already exist
|
||||
* @returns Promise that resolves with "OK" on success, or null if the key already exists
|
||||
*/
|
||||
set(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob, nx: "NX"): Promise<"OK" | null>;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value only if key already exists
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param xx Only set the key if it already exists
|
||||
* @returns Promise that resolves with "OK" on success, or null if the key does not exist
|
||||
*/
|
||||
set(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob, xx: "XX"): Promise<"OK" | null>;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value and return the old value
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param get Return the old string stored at key, or null if key did not exist
|
||||
* @returns Promise that resolves with the old value, or null if key did not exist
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
get: "GET",
|
||||
): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value and retain the time to live
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param keepttl Retain the time to live associated with the key
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
keepttl: "KEEPTTL",
|
||||
): Promise<"OK">;
|
||||
|
||||
/**
|
||||
* Set key to hold the string value with various options
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @param options Array of options (EX, PX, EXAT, PXAT, NX, XX, KEEPTTL, GET)
|
||||
* @returns Promise that resolves with "OK" on success, null if NX/XX condition not met, or the old value if GET is specified
|
||||
*/
|
||||
set(
|
||||
key: string | ArrayBufferView | Blob,
|
||||
value: string | ArrayBufferView | Blob,
|
||||
...options: string[]
|
||||
): Promise<"OK" | string | null>;
|
||||
|
||||
/**
|
||||
* Delete a key
|
||||
* @param key The key to delete
|
||||
* @returns Promise that resolves with the number of keys removed
|
||||
*/
|
||||
del(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Increment the integer value of a key by one
|
||||
* @param key The key to increment
|
||||
* @returns Promise that resolves with the new value
|
||||
*/
|
||||
incr(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Decrement the integer value of a key by one
|
||||
* @param key The key to decrement
|
||||
* @returns Promise that resolves with the new value
|
||||
*/
|
||||
decr(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Determine if a key exists
|
||||
* @param key The key to check
|
||||
* @returns Promise that resolves with true if the key exists, false otherwise
|
||||
*/
|
||||
exists(key: string | ArrayBufferView | Blob): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Set a key's time to live in seconds
|
||||
* @param key The key to set the expiration for
|
||||
* @param seconds The number of seconds until expiration
|
||||
* @returns Promise that resolves with 1 if the timeout was set, 0 if not
|
||||
*/
|
||||
expire(key: string | ArrayBufferView | Blob, seconds: number): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the time to live for a key in seconds
|
||||
* @param key The key to get the TTL for
|
||||
* @returns Promise that resolves with the TTL, -1 if no expiry, or -2 if key doesn't exist
|
||||
*/
|
||||
ttl(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Set multiple hash fields to multiple values
|
||||
* @param key The hash key
|
||||
* @param fieldValues An array of alternating field names and values
|
||||
* @returns Promise that resolves with "OK" on success
|
||||
*/
|
||||
hmset(key: string | ArrayBufferView | Blob, fieldValues: string[]): Promise<string>;
|
||||
|
||||
/**
|
||||
* Get the values of all the given hash fields
|
||||
* @param key The hash key
|
||||
* @param fields The fields to get
|
||||
* @returns Promise that resolves with an array of values
|
||||
*/
|
||||
hmget(key: string | ArrayBufferView | Blob, fields: string[]): Promise<Array<string | null>>;
|
||||
|
||||
/**
|
||||
* Check if a value is a member of a set
|
||||
* @param key The set key
|
||||
* @param member The member to check
|
||||
* @returns Promise that resolves with true if the member exists, false otherwise
|
||||
*/
|
||||
sismember(key: string | ArrayBufferView | Blob, member: string): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Add a member to a set
|
||||
* @param key The set key
|
||||
* @param member The member to add
|
||||
* @returns Promise that resolves with 1 if the member was added, 0 if it already existed
|
||||
*/
|
||||
sadd(key: string | ArrayBufferView | Blob, member: string): Promise<number>;
|
||||
|
||||
/**
|
||||
* Remove a member from a set
|
||||
* @param key The set key
|
||||
* @param member The member to remove
|
||||
* @returns Promise that resolves with 1 if the member was removed, 0 if it didn't exist
|
||||
*/
|
||||
srem(key: string | ArrayBufferView | Blob, member: string): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get all the members in a set
|
||||
* @param key The set key
|
||||
* @returns Promise that resolves with an array of all members
|
||||
*/
|
||||
smembers(key: string | ArrayBufferView | Blob): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Get a random member from a set
|
||||
* @param key The set key
|
||||
* @returns Promise that resolves with a random member, or null if the set is empty
|
||||
*/
|
||||
srandmember(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Remove and return a random member from a set
|
||||
* @param key The set key
|
||||
* @returns Promise that resolves with the removed member, or null if the set is empty
|
||||
*/
|
||||
spop(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Increment the integer value of a hash field by the given number
|
||||
* @param key The hash key
|
||||
* @param field The field to increment
|
||||
* @param increment The amount to increment by
|
||||
* @returns Promise that resolves with the new value
|
||||
*/
|
||||
hincrby(key: string | ArrayBufferView | Blob, field: string, increment: string | number): Promise<number>;
|
||||
|
||||
/**
|
||||
* Increment the float value of a hash field by the given amount
|
||||
* @param key The hash key
|
||||
* @param field The field to increment
|
||||
* @param increment The amount to increment by
|
||||
* @returns Promise that resolves with the new value as a string
|
||||
*/
|
||||
hincrbyfloat(key: string | ArrayBufferView | Blob, field: string, increment: string | number): Promise<string>;
|
||||
|
||||
/**
|
||||
* Get all the fields and values in a hash
|
||||
* @param key The hash key
|
||||
* @returns Promise that resolves with an object containing all fields and values
|
||||
*/
|
||||
hgetall(key: string | ArrayBufferView | Blob): Promise<Record<string, string> | null>;
|
||||
|
||||
/**
|
||||
* Get all field names in a hash
|
||||
* @param key The hash key
|
||||
* @returns Promise that resolves with an array of field names
|
||||
*/
|
||||
hkeys(key: string | ArrayBufferView | Blob): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Get the number of fields in a hash
|
||||
* @param key The hash key
|
||||
* @returns Promise that resolves with the number of fields
|
||||
*/
|
||||
hlen(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get all values in a hash
|
||||
* @param key The hash key
|
||||
* @returns Promise that resolves with an array of values
|
||||
*/
|
||||
hvals(key: string | ArrayBufferView | Blob): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Find all keys matching the given pattern
|
||||
* @param pattern The pattern to match
|
||||
* @returns Promise that resolves with an array of matching keys
|
||||
*/
|
||||
keys(pattern: string): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Get the length of a list
|
||||
* @param key The list key
|
||||
* @returns Promise that resolves with the length of the list
|
||||
*/
|
||||
llen(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Remove and get the first element in a list
|
||||
* @param key The list key
|
||||
* @returns Promise that resolves with the first element, or null if the list is empty
|
||||
*/
|
||||
lpop(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Remove the expiration from a key
|
||||
* @param key The key to persist
|
||||
* @returns Promise that resolves with 1 if the timeout was removed, 0 if the key doesn't exist or has no timeout
|
||||
*/
|
||||
persist(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the expiration time of a key as a UNIX timestamp in milliseconds
|
||||
* @param key The key to check
|
||||
* @returns Promise that resolves with the timestamp, or -1 if the key has no expiration, or -2 if the key doesn't exist
|
||||
*/
|
||||
pexpiretime(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the time to live for a key in milliseconds
|
||||
* @param key The key to check
|
||||
* @returns Promise that resolves with the TTL in milliseconds, or -1 if the key has no expiration, or -2 if the key doesn't exist
|
||||
*/
|
||||
pttl(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Remove and get the last element in a list
|
||||
* @param key The list key
|
||||
* @returns Promise that resolves with the last element, or null if the list is empty
|
||||
*/
|
||||
rpop(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the number of members in a set
|
||||
* @param key The set key
|
||||
* @returns Promise that resolves with the cardinality (number of elements) of the set
|
||||
*/
|
||||
scard(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the length of the value stored in a key
|
||||
* @param key The key to check
|
||||
* @returns Promise that resolves with the length of the string value, or 0 if the key doesn't exist
|
||||
*/
|
||||
strlen(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the number of members in a sorted set
|
||||
* @param key The sorted set key
|
||||
* @returns Promise that resolves with the cardinality (number of elements) of the sorted set
|
||||
*/
|
||||
zcard(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Remove and return members with the highest scores in a sorted set
|
||||
* @param key The sorted set key
|
||||
* @returns Promise that resolves with the removed member and its score, or null if the set is empty
|
||||
*/
|
||||
zpopmax(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Remove and return members with the lowest scores in a sorted set
|
||||
* @param key The sorted set key
|
||||
* @returns Promise that resolves with the removed member and its score, or null if the set is empty
|
||||
*/
|
||||
zpopmin(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get one or multiple random members from a sorted set
|
||||
* @param key The sorted set key
|
||||
* @returns Promise that resolves with a random member, or null if the set is empty
|
||||
*/
|
||||
zrandmember(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Append a value to a key
|
||||
* @param key The key to append to
|
||||
* @param value The value to append
|
||||
* @returns Promise that resolves with the length of the string after the append operation
|
||||
*/
|
||||
append(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Set the value of a key and return its old value
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @returns Promise that resolves with the old value, or null if the key didn't exist
|
||||
*/
|
||||
getset(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Prepend one or multiple values to a list
|
||||
* @param key The list key
|
||||
* @param value The value to prepend
|
||||
* @returns Promise that resolves with the length of the list after the push operation
|
||||
*/
|
||||
lpush(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Prepend a value to a list, only if the list exists
|
||||
* @param key The list key
|
||||
* @param value The value to prepend
|
||||
* @returns Promise that resolves with the length of the list after the push operation, or 0 if the list doesn't exist
|
||||
*/
|
||||
lpushx(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Add one or more members to a HyperLogLog
|
||||
* @param key The HyperLogLog key
|
||||
* @param element The element to add
|
||||
* @returns Promise that resolves with 1 if the HyperLogLog was altered, 0 otherwise
|
||||
*/
|
||||
pfadd(key: string | ArrayBufferView | Blob, element: string): Promise<number>;
|
||||
|
||||
/**
|
||||
* Append one or multiple values to a list
|
||||
* @param key The list key
|
||||
* @param value The value to append
|
||||
* @returns Promise that resolves with the length of the list after the push operation
|
||||
*/
|
||||
rpush(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Append a value to a list, only if the list exists
|
||||
* @param key The list key
|
||||
* @param value The value to append
|
||||
* @returns Promise that resolves with the length of the list after the push operation, or 0 if the list doesn't exist
|
||||
*/
|
||||
rpushx(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Set the value of a key, only if the key does not exist
|
||||
* @param key The key to set
|
||||
* @param value The value to set
|
||||
* @returns Promise that resolves with 1 if the key was set, 0 if the key was not set
|
||||
*/
|
||||
setnx(key: string | ArrayBufferView | Blob, value: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the score associated with the given member in a sorted set
|
||||
* @param key The sorted set key
|
||||
* @param member The member to get the score for
|
||||
* @returns Promise that resolves with the score of the member as a string, or null if the member or key doesn't exist
|
||||
*/
|
||||
zscore(key: string | ArrayBufferView | Blob, member: string): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the values of all specified keys
|
||||
* @param keys The keys to get
|
||||
* @returns Promise that resolves with an array of values, with null for keys that don't exist
|
||||
*/
|
||||
mget(...keys: (string | ArrayBufferView | Blob)[]): Promise<(string | null)[]>;
|
||||
|
||||
/**
|
||||
* Count the number of set bits (population counting) in a string
|
||||
* @param key The key to count bits in
|
||||
* @returns Promise that resolves with the number of bits set to 1
|
||||
*/
|
||||
bitcount(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Return a serialized version of the value stored at the specified key
|
||||
* @param key The key to dump
|
||||
* @returns Promise that resolves with the serialized value, or null if the key doesn't exist
|
||||
*/
|
||||
dump(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the expiration time of a key as a UNIX timestamp in seconds
|
||||
* @param key The key to check
|
||||
* @returns Promise that resolves with the timestamp, or -1 if the key has no expiration, or -2 if the key doesn't exist
|
||||
*/
|
||||
expiretime(key: string | ArrayBufferView | Blob): Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and delete the key
|
||||
* @param key The key to get and delete
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getdel(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and optionally set its expiration
|
||||
* @param key The key to get
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getex(key: string | ArrayBufferView | Blob): Promise<string | null>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default Redis client
|
||||
*
|
||||
* Connection information populated from one of, in order of preference:
|
||||
* - `process.env.VALKEY_URL`
|
||||
* - `process.env.REDIS_URL`
|
||||
* - `"valkey://localhost:6379"`
|
||||
*
|
||||
*/
|
||||
export const redis: RedisClient;
|
||||
}
|
||||
945
packages/bun-types/s3.d.ts
vendored
Normal file
945
packages/bun-types/s3.d.ts
vendored
Normal file
@@ -0,0 +1,945 @@
|
||||
declare module "bun" {
|
||||
/**
|
||||
* Fast incremental writer for files and pipes.
|
||||
*
|
||||
* This uses the same interface as {@link ArrayBufferSink}, but writes to a file or pipe.
|
||||
*/
|
||||
interface FileSink {
|
||||
/**
|
||||
* Write a chunk of data to the file.
|
||||
*
|
||||
* If the file descriptor is not writable yet, the data is buffered.
|
||||
*/
|
||||
write(chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer): number;
|
||||
/**
|
||||
* Flush the internal buffer, committing the data to disk or the pipe.
|
||||
*/
|
||||
flush(): number | Promise<number>;
|
||||
/**
|
||||
* Close the file descriptor. This also flushes the internal buffer.
|
||||
*/
|
||||
end(error?: Error): number | Promise<number>;
|
||||
|
||||
start(options?: {
|
||||
/**
|
||||
* Preallocate an internal buffer of this size
|
||||
* This can significantly improve performance when the chunk size is small
|
||||
*/
|
||||
highWaterMark?: number;
|
||||
}): void;
|
||||
|
||||
/**
|
||||
* For FIFOs & pipes, this lets you decide whether Bun's process should
|
||||
* remain alive until the pipe is closed.
|
||||
*
|
||||
* By default, it is automatically managed. While the stream is open, the
|
||||
* process remains alive and once the other end hangs up or the stream
|
||||
* closes, the process exits.
|
||||
*
|
||||
* If you previously called {@link unref}, you can call this again to re-enable automatic management.
|
||||
*
|
||||
* Internally, it will reference count the number of times this is called. By default, that number is 1
|
||||
*
|
||||
* If the file is not a FIFO or pipe, {@link ref} and {@link unref} do
|
||||
* nothing. If the pipe is already closed, this does nothing.
|
||||
*/
|
||||
ref(): void;
|
||||
|
||||
/**
|
||||
* For FIFOs & pipes, this lets you decide whether Bun's process should
|
||||
* remain alive until the pipe is closed.
|
||||
*
|
||||
* If you want to allow Bun's process to terminate while the stream is open,
|
||||
* call this.
|
||||
*
|
||||
* If the file is not a FIFO or pipe, {@link ref} and {@link unref} do
|
||||
* nothing. If the pipe is already closed, this does nothing.
|
||||
*/
|
||||
unref(): void;
|
||||
}
|
||||
|
||||
interface NetworkSink extends FileSink {
|
||||
/**
|
||||
* Write a chunk of data to the network.
|
||||
*
|
||||
* If the network is not writable yet, the data is buffered.
|
||||
*/
|
||||
write(chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer): number;
|
||||
/**
|
||||
* Flush the internal buffer, committing the data to the network.
|
||||
*/
|
||||
flush(): number | Promise<number>;
|
||||
/**
|
||||
* Finish the upload. This also flushes the internal buffer.
|
||||
*/
|
||||
end(error?: Error): number | Promise<number>;
|
||||
|
||||
/**
|
||||
* Get the stat of the file.
|
||||
*/
|
||||
stat(): Promise<import("node:fs").Stats>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration options for S3 operations
|
||||
*/
|
||||
interface S3Options extends BlobPropertyBag {
|
||||
/**
|
||||
* The Access Control List (ACL) policy for the file.
|
||||
* Controls who can access the file and what permissions they have.
|
||||
*
|
||||
* @example
|
||||
* // Setting public read access
|
||||
* const file = s3.file("public-file.txt", {
|
||||
* acl: "public-read",
|
||||
* bucket: "my-bucket"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Using with presigned URLs
|
||||
* const url = file.presign({
|
||||
* acl: "public-read",
|
||||
* expiresIn: 3600
|
||||
* });
|
||||
*/
|
||||
acl?:
|
||||
| "private"
|
||||
| "public-read"
|
||||
| "public-read-write"
|
||||
| "aws-exec-read"
|
||||
| "authenticated-read"
|
||||
| "bucket-owner-read"
|
||||
| "bucket-owner-full-control"
|
||||
| "log-delivery-write";
|
||||
|
||||
/**
|
||||
* The S3 bucket name. Defaults to `S3_BUCKET` or `AWS_BUCKET` environment variables.
|
||||
*
|
||||
* @example
|
||||
* // Using explicit bucket
|
||||
* const file = s3.file("my-file.txt", { bucket: "my-bucket" });
|
||||
*
|
||||
* @example
|
||||
* // Using environment variables
|
||||
* // With S3_BUCKET=my-bucket in .env
|
||||
* const file = s3.file("my-file.txt");
|
||||
*/
|
||||
bucket?: string;
|
||||
|
||||
/**
|
||||
* The AWS region. Defaults to `S3_REGION` or `AWS_REGION` environment variables.
|
||||
*
|
||||
* @example
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* bucket: "my-bucket",
|
||||
* region: "us-west-2"
|
||||
* });
|
||||
*/
|
||||
region?: string;
|
||||
|
||||
/**
|
||||
* The access key ID for authentication.
|
||||
* Defaults to `S3_ACCESS_KEY_ID` or `AWS_ACCESS_KEY_ID` environment variables.
|
||||
*/
|
||||
accessKeyId?: string;
|
||||
|
||||
/**
|
||||
* The secret access key for authentication.
|
||||
* Defaults to `S3_SECRET_ACCESS_KEY` or `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||
*/
|
||||
secretAccessKey?: string;
|
||||
|
||||
/**
|
||||
* Optional session token for temporary credentials.
|
||||
* Defaults to `S3_SESSION_TOKEN` or `AWS_SESSION_TOKEN` environment variables.
|
||||
*
|
||||
* @example
|
||||
* // Using temporary credentials
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* accessKeyId: tempAccessKey,
|
||||
* secretAccessKey: tempSecretKey,
|
||||
* sessionToken: tempSessionToken
|
||||
* });
|
||||
*/
|
||||
sessionToken?: string;
|
||||
|
||||
/**
|
||||
* The S3-compatible service endpoint URL.
|
||||
* Defaults to `S3_ENDPOINT` or `AWS_ENDPOINT` environment variables.
|
||||
*
|
||||
* @example
|
||||
* // AWS S3
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* endpoint: "https://s3.us-east-1.amazonaws.com"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Cloudflare R2
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* endpoint: "https://<account-id>.r2.cloudflarestorage.com"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // DigitalOcean Spaces
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* endpoint: "https://<region>.digitaloceanspaces.com"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // MinIO (local development)
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* endpoint: "http://localhost:9000"
|
||||
* });
|
||||
*/
|
||||
endpoint?: string;
|
||||
|
||||
/**
|
||||
* Use virtual hosted style endpoint. default to false, when true if `endpoint` is informed it will ignore the `bucket`
|
||||
*
|
||||
* @example
|
||||
* // Using virtual hosted style
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* virtualHostedStyle: true,
|
||||
* endpoint: "https://my-bucket.s3.us-east-1.amazonaws.com"
|
||||
* });
|
||||
*/
|
||||
virtualHostedStyle?: boolean;
|
||||
|
||||
/**
|
||||
* The size of each part in multipart uploads (in bytes).
|
||||
* - Minimum: 5 MiB
|
||||
* - Maximum: 5120 MiB
|
||||
* - Default: 5 MiB
|
||||
*
|
||||
* @example
|
||||
* // Configuring multipart uploads
|
||||
* const file = s3.file("large-file.dat", {
|
||||
* partSize: 10 * 1024 * 1024, // 10 MiB parts
|
||||
* queueSize: 4 // Upload 4 parts in parallel
|
||||
* });
|
||||
*
|
||||
* const writer = file.writer();
|
||||
* // ... write large file in chunks
|
||||
*/
|
||||
partSize?: number;
|
||||
|
||||
/**
|
||||
* Number of parts to upload in parallel for multipart uploads.
|
||||
* - Default: 5
|
||||
* - Maximum: 255
|
||||
*
|
||||
* Increasing this value can improve upload speeds for large files
|
||||
* but will use more memory.
|
||||
*/
|
||||
queueSize?: number;
|
||||
|
||||
/**
|
||||
* Number of retry attempts for failed uploads.
|
||||
* - Default: 3
|
||||
* - Maximum: 255
|
||||
*
|
||||
* @example
|
||||
* // Setting retry attempts
|
||||
* const file = s3.file("my-file.txt", {
|
||||
* retry: 5 // Retry failed uploads up to 5 times
|
||||
* });
|
||||
*/
|
||||
retry?: number;
|
||||
|
||||
/**
|
||||
* The Content-Type of the file.
|
||||
* Automatically set based on file extension when possible.
|
||||
*
|
||||
* @example
|
||||
* // Setting explicit content type
|
||||
* const file = s3.file("data.bin", {
|
||||
* type: "application/octet-stream"
|
||||
* });
|
||||
*/
|
||||
type?: string;
|
||||
|
||||
/**
|
||||
* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects.
|
||||
*
|
||||
* @example
|
||||
* // Setting explicit Storage class
|
||||
* const file = s3.file("my-file.json", {
|
||||
* storageClass: "STANDARD_IA"
|
||||
* });
|
||||
*/
|
||||
storageClass?:
|
||||
| "STANDARD"
|
||||
| "DEEP_ARCHIVE"
|
||||
| "EXPRESS_ONEZONE"
|
||||
| "GLACIER"
|
||||
| "GLACIER_IR"
|
||||
| "INTELLIGENT_TIERING"
|
||||
| "ONEZONE_IA"
|
||||
| "OUTPOSTS"
|
||||
| "REDUCED_REDUNDANCY"
|
||||
| "SNOW"
|
||||
| "STANDARD_IA";
|
||||
|
||||
/**
|
||||
* @deprecated The size of the internal buffer in bytes. Defaults to 5 MiB. use `partSize` and `queueSize` instead.
|
||||
*/
|
||||
highWaterMark?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for generating presigned URLs
|
||||
*/
|
||||
interface S3FilePresignOptions extends S3Options {
|
||||
/**
|
||||
* Number of seconds until the presigned URL expires.
|
||||
* - Default: 86400 (1 day)
|
||||
*
|
||||
* @example
|
||||
* // Short-lived URL
|
||||
* const url = file.presign({
|
||||
* expiresIn: 3600 // 1 hour
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Long-lived public URL
|
||||
* const url = file.presign({
|
||||
* expiresIn: 7 * 24 * 60 * 60, // 7 days
|
||||
* acl: "public-read"
|
||||
* });
|
||||
*/
|
||||
expiresIn?: number;
|
||||
|
||||
/**
|
||||
* The HTTP method allowed for the presigned URL.
|
||||
*
|
||||
* @example
|
||||
* // GET URL for downloads
|
||||
* const downloadUrl = file.presign({
|
||||
* method: "GET",
|
||||
* expiresIn: 3600
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // PUT URL for uploads
|
||||
* const uploadUrl = file.presign({
|
||||
* method: "PUT",
|
||||
* expiresIn: 3600,
|
||||
* type: "application/json"
|
||||
* });
|
||||
*/
|
||||
method?: "GET" | "POST" | "PUT" | "DELETE" | "HEAD";
|
||||
}
|
||||
|
||||
interface S3Stats {
|
||||
size: number;
|
||||
lastModified: Date;
|
||||
etag: string;
|
||||
type: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a file in an S3-compatible storage service.
|
||||
* Extends the Blob interface for compatibility with web APIs.
|
||||
*
|
||||
* @category Cloud Storage
|
||||
*/
|
||||
interface S3File extends Blob {
|
||||
/**
|
||||
* The size of the file in bytes.
|
||||
* This is a Promise because it requires a network request to determine the size.
|
||||
*
|
||||
* @example
|
||||
* // Getting file size
|
||||
* const size = await file.size;
|
||||
* console.log(`File size: ${size} bytes`);
|
||||
*
|
||||
* @example
|
||||
* // Check if file is larger than 1MB
|
||||
* if (await file.size > 1024 * 1024) {
|
||||
* console.log("Large file detected");
|
||||
* }
|
||||
*/
|
||||
/**
|
||||
* TODO: figure out how to get the typescript types to not error for this property.
|
||||
*/
|
||||
// size: Promise<number>;
|
||||
|
||||
/**
|
||||
* Creates a new S3File representing a slice of the original file.
|
||||
* Uses HTTP Range headers for efficient partial downloads.
|
||||
*
|
||||
* @param begin - Starting byte offset
|
||||
* @param end - Ending byte offset (exclusive)
|
||||
* @param contentType - Optional MIME type for the slice
|
||||
* @returns A new S3File representing the specified range
|
||||
*
|
||||
* @example
|
||||
* // Reading file header
|
||||
* const header = file.slice(0, 1024);
|
||||
* const headerText = await header.text();
|
||||
*
|
||||
* @example
|
||||
* // Reading with content type
|
||||
* const jsonSlice = file.slice(1024, 2048, "application/json");
|
||||
* const data = await jsonSlice.json();
|
||||
*
|
||||
* @example
|
||||
* // Reading from offset to end
|
||||
* const remainder = file.slice(1024);
|
||||
* const content = await remainder.text();
|
||||
*/
|
||||
slice(begin?: number, end?: number, contentType?: string): S3File;
|
||||
slice(begin?: number, contentType?: string): S3File;
|
||||
slice(contentType?: string): S3File;
|
||||
|
||||
/**
|
||||
* Creates a writable stream for uploading data.
|
||||
* Suitable for large files as it uses multipart upload.
|
||||
*
|
||||
* @param options - Configuration for the upload
|
||||
* @returns A NetworkSink for writing data
|
||||
*
|
||||
* @example
|
||||
* // Basic streaming write
|
||||
* const writer = file.writer({
|
||||
* type: "application/json"
|
||||
* });
|
||||
* writer.write('{"hello": ');
|
||||
* writer.write('"world"}');
|
||||
* await writer.end();
|
||||
*
|
||||
* @example
|
||||
* // Optimized large file upload
|
||||
* const writer = file.writer({
|
||||
* partSize: 10 * 1024 * 1024, // 10MB parts
|
||||
* queueSize: 4, // Upload 4 parts in parallel
|
||||
* retry: 3 // Retry failed parts
|
||||
* });
|
||||
*
|
||||
* // Write large chunks of data efficiently
|
||||
* for (const chunk of largeDataChunks) {
|
||||
* writer.write(chunk);
|
||||
* }
|
||||
* await writer.end();
|
||||
*
|
||||
* @example
|
||||
* // Error handling
|
||||
* const writer = file.writer();
|
||||
* try {
|
||||
* writer.write(data);
|
||||
* await writer.end();
|
||||
* } catch (err) {
|
||||
* console.error('Upload failed:', err);
|
||||
* // Writer will automatically abort multipart upload on error
|
||||
* }
|
||||
*/
|
||||
writer(options?: S3Options): NetworkSink;
|
||||
|
||||
/**
|
||||
* Gets a readable stream of the file's content.
|
||||
* Useful for processing large files without loading them entirely into memory.
|
||||
*
|
||||
* @returns A ReadableStream for the file content
|
||||
*
|
||||
* @example
|
||||
* // Basic streaming read
|
||||
* const stream = file.stream();
|
||||
* for await (const chunk of stream) {
|
||||
* console.log('Received chunk:', chunk);
|
||||
* }
|
||||
*
|
||||
* @example
|
||||
* // Piping to response
|
||||
* const stream = file.stream();
|
||||
* return new Response(stream, {
|
||||
* headers: { 'Content-Type': file.type }
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Processing large files
|
||||
* const stream = file.stream();
|
||||
* const textDecoder = new TextDecoder();
|
||||
* for await (const chunk of stream) {
|
||||
* const text = textDecoder.decode(chunk);
|
||||
* // Process text chunk by chunk
|
||||
* }
|
||||
*/
|
||||
readonly readable: ReadableStream;
|
||||
stream(): ReadableStream;
|
||||
|
||||
/**
|
||||
* The name or path of the file in the bucket.
|
||||
*
|
||||
* @example
|
||||
* const file = s3.file("folder/image.jpg");
|
||||
* console.log(file.name); // "folder/image.jpg"
|
||||
*/
|
||||
readonly name?: string;
|
||||
|
||||
/**
|
||||
* The bucket name containing the file.
|
||||
*
|
||||
* @example
|
||||
* const file = s3.file("s3://my-bucket/file.txt");
|
||||
* console.log(file.bucket); // "my-bucket"
|
||||
*/
|
||||
readonly bucket?: string;
|
||||
|
||||
/**
|
||||
* Checks if the file exists in S3.
|
||||
* Uses HTTP HEAD request to efficiently check existence without downloading.
|
||||
*
|
||||
* @returns Promise resolving to true if file exists, false otherwise
|
||||
*
|
||||
* @example
|
||||
* // Basic existence check
|
||||
* if (await file.exists()) {
|
||||
* console.log("File exists in S3");
|
||||
* }
|
||||
*
|
||||
* @example
|
||||
* // With error handling
|
||||
* try {
|
||||
* const exists = await file.exists();
|
||||
* if (!exists) {
|
||||
* console.log("File not found");
|
||||
* }
|
||||
* } catch (err) {
|
||||
* console.error("Error checking file:", err);
|
||||
* }
|
||||
*/
|
||||
exists(): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Uploads data to S3.
|
||||
* Supports various input types and automatically handles large files.
|
||||
*
|
||||
* @param data - The data to upload
|
||||
* @param options - Upload configuration options
|
||||
* @returns Promise resolving to number of bytes written
|
||||
*
|
||||
* @example
|
||||
* // Writing string data
|
||||
* await file.write("Hello World", {
|
||||
* type: "text/plain"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Writing JSON
|
||||
* const data = { hello: "world" };
|
||||
* await file.write(JSON.stringify(data), {
|
||||
* type: "application/json"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Writing from Response
|
||||
* const response = await fetch("https://example.com/data");
|
||||
* await file.write(response);
|
||||
*
|
||||
* @example
|
||||
* // Writing with ACL
|
||||
* await file.write(data, {
|
||||
* acl: "public-read",
|
||||
* type: "application/octet-stream"
|
||||
* });
|
||||
*/
|
||||
write(
|
||||
data: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer | Request | Response | BunFile | S3File | Blob,
|
||||
options?: S3Options,
|
||||
): Promise<number>;
|
||||
|
||||
/**
|
||||
* Generates a presigned URL for the file.
|
||||
* Allows temporary access to the file without exposing credentials.
|
||||
*
|
||||
* @param options - Configuration for the presigned URL
|
||||
* @returns Presigned URL string
|
||||
*
|
||||
* @example
|
||||
* // Basic download URL
|
||||
* const url = file.presign({
|
||||
* expiresIn: 3600 // 1 hour
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // Upload URL with specific content type
|
||||
* const uploadUrl = file.presign({
|
||||
* method: "PUT",
|
||||
* expiresIn: 3600,
|
||||
* type: "image/jpeg",
|
||||
* acl: "public-read"
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* // URL with custom permissions
|
||||
* const url = file.presign({
|
||||
* method: "GET",
|
||||
* expiresIn: 7 * 24 * 60 * 60, // 7 days
|
||||
* acl: "public-read"
|
||||
* });
|
||||
*/
|
||||
presign(options?: S3FilePresignOptions): string;
|
||||
|
||||
/**
|
||||
* Deletes the file from S3.
|
||||
*
|
||||
* @returns Promise that resolves when deletion is complete
|
||||
*
|
||||
* @example
|
||||
* // Basic deletion
|
||||
* await file.delete();
|
||||
*
|
||||
* @example
|
||||
* // With error handling
|
||||
* try {
|
||||
* await file.delete();
|
||||
* console.log("File deleted successfully");
|
||||
* } catch (err) {
|
||||
* console.error("Failed to delete file:", err);
|
||||
* }
|
||||
*/
|
||||
delete(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Alias for delete() method.
|
||||
* Provided for compatibility with Node.js fs API naming.
|
||||
*
|
||||
* @example
|
||||
* await file.unlink();
|
||||
*/
|
||||
unlink: S3File["delete"];
|
||||
|
||||
/**
|
||||
* Get the stat of a file in an S3-compatible storage service.
|
||||
*
|
||||
* @returns Promise resolving to S3Stat
|
||||
*/
|
||||
stat(): Promise<S3Stats>;
|
||||
}
|
||||
|
||||
interface S3ListObjectsOptions {
|
||||
/** Limits the response to keys that begin with the specified prefix. */
|
||||
prefix?: string;
|
||||
/** ContinuationToken indicates to S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results. */
|
||||
continuationToken?: string;
|
||||
/** A delimiter is a character that you use to group keys. */
|
||||
delimiter?: string;
|
||||
/** Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. */
|
||||
maxKeys?: number;
|
||||
/** StartAfter is where you want S3 to start listing from. S3 starts listing after this specified key. StartAfter can be any key in the bucket. */
|
||||
startAfter?: string;
|
||||
/** Encoding type used by S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that S3 encode the keys in the response. */
|
||||
encodingType?: "url";
|
||||
/** If you want to return the owner field with each key in the result, then set the FetchOwner field to true. */
|
||||
fetchOwner?: boolean;
|
||||
}
|
||||
|
||||
interface S3ListObjectsResponse {
|
||||
/** All of the keys (up to 1,000) that share the same prefix are grouped together. When counting the total numbers of returns by this API operation, this group of keys is considered as one item.
|
||||
*
|
||||
* A response can contain CommonPrefixes only if you specify a delimiter.
|
||||
*
|
||||
* CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.
|
||||
*
|
||||
* CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.
|
||||
*
|
||||
* For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns. */
|
||||
commonPrefixes?: { prefix: string }[];
|
||||
/** Metadata about each object returned. */
|
||||
contents?: {
|
||||
/** The algorithm that was used to create a checksum of the object. */
|
||||
checksumAlgorithm?: "CRC32" | "CRC32C" | "SHA1" | "SHA256" | "CRC64NVME";
|
||||
/** The checksum type that is used to calculate the object’s checksum value. */
|
||||
checksumType?: "COMPOSITE" | "FULL_OBJECT";
|
||||
/**
|
||||
* The entity tag is a hash of the object. The ETag reflects changes only to the contents of an object, not its metadata. The ETag may or may not be an MD5 digest of the object data. Whether or not it is depends on how the object was created and how it is encrypted as described below:
|
||||
*
|
||||
* - Objects created by the PUT Object, POST Object, or Copy operation, or through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object data.
|
||||
* - Objects created by the PUT Object, POST Object, or Copy operation, or through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data.
|
||||
* - If an object is created by either the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the method of encryption. If an object is larger than 16 MB, the AWS Management Console will upload or copy that object as a Multipart Upload, and therefore the ETag will not be an MD5 digest.
|
||||
*
|
||||
* MD5 is not supported by directory buckets.
|
||||
*/
|
||||
eTag?: string;
|
||||
/** The name that you assign to an object. You use the object key to retrieve the object. */
|
||||
key: string;
|
||||
/** Creation date of the object. */
|
||||
lastModified?: string;
|
||||
/** The owner of the object */
|
||||
owner?: {
|
||||
/** The ID of the owner. */
|
||||
id?: string;
|
||||
/** The display name of the owner. */
|
||||
displayName?: string;
|
||||
};
|
||||
/** Specifies the restoration status of an object. Objects in certain storage classes must be restored before they can be retrieved. */
|
||||
restoreStatus?: {
|
||||
/** Specifies whether the object is currently being restored. */
|
||||
isRestoreInProgress?: boolean;
|
||||
/** Indicates when the restored copy will expire. This value is populated only if the object has already been restored. */
|
||||
restoreExpiryDate?: string;
|
||||
};
|
||||
/** Size in bytes of the object */
|
||||
size?: number;
|
||||
/** The class of storage used to store the object. */
|
||||
storageClass?:
|
||||
| "STANDARD"
|
||||
| "REDUCED_REDUNDANCY"
|
||||
| "GLACIER"
|
||||
| "STANDARD_IA"
|
||||
| "ONEZONE_IA"
|
||||
| "INTELLIGENT_TIERING"
|
||||
| "DEEP_ARCHIVE"
|
||||
| "OUTPOSTS"
|
||||
| "GLACIER_IR"
|
||||
| "SNOW"
|
||||
| "EXPRESS_ONEZONE";
|
||||
}[];
|
||||
/** If ContinuationToken was sent with the request, it is included in the response. You can use the returned ContinuationToken for pagination of the list response. */
|
||||
continuationToken?: string;
|
||||
/** Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value. */
|
||||
delimiter?: string;
|
||||
/** Encoding type used by S3 to encode object key names in the XML response. */
|
||||
encodingType?: "url";
|
||||
/** Set to false if all of the results were returned. Set to true if more keys are available to return. If the number of results exceeds that specified by MaxKeys, all of the results might not be returned. */
|
||||
isTruncated?: boolean;
|
||||
/** KeyCount is the number of keys returned with this request. KeyCount will always be less than or equal to the MaxKeys field. For example, if you ask for 50 keys, your result will include 50 keys or fewer. */
|
||||
keyCount?: number;
|
||||
/** Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. */
|
||||
maxKeys?: number;
|
||||
/** The bucket name. */
|
||||
name?: string;
|
||||
/** NextContinuationToken is sent when isTruncated is true, which means there are more keys in the bucket that can be listed. The next list requests to S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key. */
|
||||
nextContinuationToken?: string;
|
||||
/** Keys that begin with the indicated prefix. */
|
||||
prefix?: string;
|
||||
/** If StartAfter was sent with the request, it is included in the response. */
|
||||
startAfter?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* A configured S3 bucket instance for managing files.
|
||||
* The instance is callable to create S3File instances and provides methods
|
||||
* for common operations.
|
||||
*
|
||||
* @example
|
||||
* // Basic bucket setup
|
||||
* const bucket = new S3Client({
|
||||
* bucket: "my-bucket",
|
||||
* accessKeyId: "key",
|
||||
* secretAccessKey: "secret"
|
||||
* });
|
||||
*
|
||||
* // Get file instance
|
||||
* const file = bucket.file("image.jpg");
|
||||
*
|
||||
* // Common operations
|
||||
* await bucket.write("data.json", JSON.stringify({hello: "world"}));
|
||||
* const url = bucket.presign("file.pdf");
|
||||
* await bucket.unlink("old.txt");
|
||||
*
|
||||
* @category Cloud Storage
|
||||
*/
|
||||
class S3Client {
|
||||
prototype: S3Client;
|
||||
/**
|
||||
* Create a new instance of an S3 bucket so that credentials can be managed
|
||||
* from a single instance instead of being passed to every method.
|
||||
*
|
||||
* @param options The default options to use for the S3 client. Can be
|
||||
* overriden by passing options to the methods.
|
||||
*
|
||||
* ## Keep S3 credentials in a single instance
|
||||
*
|
||||
* @example
|
||||
* const bucket = new Bun.S3Client({
|
||||
* accessKeyId: "your-access-key",
|
||||
* secretAccessKey: "your-secret-key",
|
||||
* bucket: "my-bucket",
|
||||
* endpoint: "https://s3.us-east-1.amazonaws.com",
|
||||
* sessionToken: "your-session-token",
|
||||
* });
|
||||
*
|
||||
* // S3Client is callable, so you can do this:
|
||||
* const file = bucket.file("my-file.txt");
|
||||
*
|
||||
* // or this:
|
||||
* await file.write("Hello Bun!");
|
||||
* await file.text();
|
||||
*
|
||||
* // To delete the file:
|
||||
* await bucket.delete("my-file.txt");
|
||||
*
|
||||
* // To write a file without returning the instance:
|
||||
* await bucket.write("my-file.txt", "Hello Bun!");
|
||||
*
|
||||
*/
|
||||
constructor(options?: S3Options);
|
||||
|
||||
/**
|
||||
* Creates an S3File instance for the given path.
|
||||
*
|
||||
* @example
|
||||
* const file = bucket.file("image.jpg");
|
||||
* await file.write(imageData);
|
||||
* const configFile = bucket.file("config.json", {
|
||||
* type: "application/json",
|
||||
* acl: "private"
|
||||
* });
|
||||
*/
|
||||
file(path: string, options?: S3Options): S3File;
|
||||
|
||||
/**
|
||||
* Writes data directly to a path in the bucket.
|
||||
* Supports strings, buffers, streams, and web API types.
|
||||
*
|
||||
* @example
|
||||
* // Write string
|
||||
* await bucket.write("hello.txt", "Hello World");
|
||||
*
|
||||
* // Write JSON with type
|
||||
* await bucket.write(
|
||||
* "data.json",
|
||||
* JSON.stringify({hello: "world"}),
|
||||
* {type: "application/json"}
|
||||
* );
|
||||
*
|
||||
* // Write from fetch
|
||||
* const res = await fetch("https://example.com/data");
|
||||
* await bucket.write("data.bin", res);
|
||||
*
|
||||
* // Write with ACL
|
||||
* await bucket.write("public.html", html, {
|
||||
* acl: "public-read",
|
||||
* type: "text/html"
|
||||
* });
|
||||
*/
|
||||
write(
|
||||
path: string,
|
||||
data:
|
||||
| string
|
||||
| ArrayBufferView
|
||||
| ArrayBuffer
|
||||
| SharedArrayBuffer
|
||||
| Request
|
||||
| Response
|
||||
| BunFile
|
||||
| S3File
|
||||
| Blob
|
||||
| File,
|
||||
options?: S3Options,
|
||||
): Promise<number>;
|
||||
|
||||
/**
|
||||
* Generate a presigned URL for temporary access to a file.
|
||||
* Useful for generating upload/download URLs without exposing credentials.
|
||||
*
|
||||
* @example
|
||||
* // Download URL
|
||||
* const downloadUrl = bucket.presign("file.pdf", {
|
||||
* expiresIn: 3600 // 1 hour
|
||||
* });
|
||||
*
|
||||
* // Upload URL
|
||||
* const uploadUrl = bucket.presign("uploads/image.jpg", {
|
||||
* method: "PUT",
|
||||
* expiresIn: 3600,
|
||||
* type: "image/jpeg",
|
||||
* acl: "public-read"
|
||||
* });
|
||||
*
|
||||
* // Long-lived public URL
|
||||
* const publicUrl = bucket.presign("public/doc.pdf", {
|
||||
* expiresIn: 7 * 24 * 60 * 60, // 7 days
|
||||
* acl: "public-read"
|
||||
* });
|
||||
*/
|
||||
presign(path: string, options?: S3FilePresignOptions): string;
|
||||
|
||||
/**
|
||||
* Delete a file from the bucket.
|
||||
*
|
||||
* @example
|
||||
* // Simple delete
|
||||
* await bucket.unlink("old-file.txt");
|
||||
*
|
||||
* // With error handling
|
||||
* try {
|
||||
* await bucket.unlink("file.dat");
|
||||
* console.log("File deleted");
|
||||
* } catch (err) {
|
||||
* console.error("Delete failed:", err);
|
||||
* }
|
||||
*/
|
||||
unlink(path: string, options?: S3Options): Promise<void>;
|
||||
delete: S3Client["unlink"];
|
||||
|
||||
/**
|
||||
* Get the size of a file in bytes.
|
||||
* Uses HEAD request to efficiently get size.
|
||||
*
|
||||
* @example
|
||||
* // Get size
|
||||
* const bytes = await bucket.size("video.mp4");
|
||||
* console.log(`Size: ${bytes} bytes`);
|
||||
*
|
||||
* // Check if file is large
|
||||
* if (await bucket.size("data.zip") > 100 * 1024 * 1024) {
|
||||
* console.log("File is larger than 100MB");
|
||||
* }
|
||||
*/
|
||||
size(path: string, options?: S3Options): Promise<number>;
|
||||
|
||||
/**
|
||||
* Check if a file exists in the bucket.
|
||||
* Uses HEAD request to check existence.
|
||||
*
|
||||
* @example
|
||||
* // Check existence
|
||||
* if (await bucket.exists("config.json")) {
|
||||
* const file = bucket.file("config.json");
|
||||
* const config = await file.json();
|
||||
* }
|
||||
*
|
||||
* // With error handling
|
||||
* try {
|
||||
* if (!await bucket.exists("required.txt")) {
|
||||
* throw new Error("Required file missing");
|
||||
* }
|
||||
* } catch (err) {
|
||||
* console.error("Check failed:", err);
|
||||
* }
|
||||
*/
|
||||
exists(path: string, options?: S3Options): Promise<boolean>;
|
||||
/**
|
||||
* Get the stat of a file in an S3-compatible storage service.
|
||||
*
|
||||
* @param path The path to the file.
|
||||
* @param options The options to use for the S3 client.
|
||||
*/
|
||||
stat(path: string, options?: S3Options): Promise<S3Stats>;
|
||||
|
||||
/** Returns some or all (up to 1,000) of the objects in a bucket with each request.
|
||||
*
|
||||
* You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
*/
|
||||
list(
|
||||
input?: S3ListObjectsOptions | null,
|
||||
options?: Pick<S3Options, "accessKeyId" | "secretAccessKey" | "sessionToken" | "region" | "bucket" | "endpoint">,
|
||||
): Promise<S3ListObjectsResponse>;
|
||||
|
||||
static list(
|
||||
input?: S3ListObjectsOptions | null,
|
||||
options?: Pick<S3Options, "accessKeyId" | "secretAccessKey" | "sessionToken" | "region" | "bucket" | "endpoint">,
|
||||
): Promise<S3ListObjectsResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* A default instance of S3Client
|
||||
*
|
||||
* Pulls credentials from environment variables. Use `new Bun.S3Client()` if you need to explicitly set credentials.
|
||||
*
|
||||
* @category Cloud Storage
|
||||
*/
|
||||
var s3: S3Client;
|
||||
}
|
||||
64
packages/bun-types/sqlite.d.ts
vendored
64
packages/bun-types/sqlite.d.ts
vendored
@@ -6,7 +6,7 @@
|
||||
* ```ts
|
||||
* import { Database } from 'bun:sqlite';
|
||||
*
|
||||
* var db = new Database('app.db');
|
||||
* const db = new Database('app.db');
|
||||
* db.query('SELECT * FROM users WHERE name = ?').all('John');
|
||||
* // => [{ id: 1, name: 'John' }]
|
||||
* ```
|
||||
@@ -24,40 +24,44 @@
|
||||
* | `null` | `NULL` |
|
||||
*/
|
||||
declare module "bun:sqlite" {
|
||||
/**
|
||||
* A SQLite3 database
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const db = new Database("mydb.sqlite");
|
||||
* db.run("CREATE TABLE foo (bar TEXT)");
|
||||
* db.run("INSERT INTO foo VALUES (?)", ["baz"]);
|
||||
* console.log(db.query("SELECT * FROM foo").all());
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* Open an in-memory database
|
||||
*
|
||||
* ```ts
|
||||
* const db = new Database(":memory:");
|
||||
* db.run("CREATE TABLE foo (bar TEXT)");
|
||||
* db.run("INSERT INTO foo VALUES (?)", ["hiiiiii"]);
|
||||
* console.log(db.query("SELECT * FROM foo").all());
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* Open read-only
|
||||
*
|
||||
* ```ts
|
||||
* const db = new Database("mydb.sqlite", {readonly: true});
|
||||
* ```
|
||||
*
|
||||
* @category Database
|
||||
*/
|
||||
export class Database implements Disposable {
|
||||
/**
|
||||
* Open or create a SQLite3 database
|
||||
*
|
||||
* @param filename The filename of the database to open. Pass an empty string (`""`) or `":memory:"` or undefined for an in-memory database.
|
||||
* @param options defaults to `{readwrite: true, create: true}`. If a number, then it's treated as `SQLITE_OPEN_*` constant flags.
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* ```ts
|
||||
* const db = new Database("mydb.sqlite");
|
||||
* db.run("CREATE TABLE foo (bar TEXT)");
|
||||
* db.run("INSERT INTO foo VALUES (?)", ["baz"]);
|
||||
* console.log(db.query("SELECT * FROM foo").all());
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* Open an in-memory database
|
||||
*
|
||||
* ```ts
|
||||
* const db = new Database(":memory:");
|
||||
* db.run("CREATE TABLE foo (bar TEXT)");
|
||||
* db.run("INSERT INTO foo VALUES (?)", ["hiiiiii"]);
|
||||
* console.log(db.query("SELECT * FROM foo").all());
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* Open read-only
|
||||
*
|
||||
* ```ts
|
||||
* const db = new Database("mydb.sqlite", {readonly: true});
|
||||
* ```
|
||||
*/
|
||||
constructor(
|
||||
filename?: string,
|
||||
@@ -567,6 +571,8 @@ declare module "bun:sqlite" {
|
||||
*
|
||||
* This is returned by {@link Database.prepare} and {@link Database.query}.
|
||||
*
|
||||
* @category Database
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const stmt = db.prepare("SELECT * FROM foo WHERE bar = ?");
|
||||
|
||||
19
packages/bun-types/test.d.ts
vendored
19
packages/bun-types/test.d.ts
vendored
@@ -16,6 +16,8 @@
|
||||
declare module "bun:test" {
|
||||
/**
|
||||
* -- Mocks --
|
||||
*
|
||||
* @category Testing
|
||||
*/
|
||||
export type Mock<T extends (...args: any[]) => any> = JestMock.Mock<T>;
|
||||
|
||||
@@ -149,6 +151,10 @@ declare module "bun:test" {
|
||||
methodOrPropertyValue: K,
|
||||
): Mock<T[K] extends (...args: any[]) => any ? T[K] : never>;
|
||||
|
||||
interface FunctionLike {
|
||||
readonly name: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes a group of related tests.
|
||||
*
|
||||
@@ -164,11 +170,9 @@ declare module "bun:test" {
|
||||
*
|
||||
* @param label the label for the tests
|
||||
* @param fn the function that defines the tests
|
||||
*
|
||||
* @category Testing
|
||||
*/
|
||||
|
||||
interface FunctionLike {
|
||||
readonly name: string;
|
||||
}
|
||||
export interface Describe {
|
||||
(fn: () => void): void;
|
||||
|
||||
@@ -352,6 +356,8 @@ declare module "bun:test" {
|
||||
* @param label the label for the test
|
||||
* @param fn the test function
|
||||
* @param options the test timeout or options
|
||||
*
|
||||
* @category Testing
|
||||
*/
|
||||
export interface Test {
|
||||
(
|
||||
@@ -420,7 +426,6 @@ declare module "bun:test" {
|
||||
*
|
||||
* @param label the label for the test
|
||||
* @param fn the test function
|
||||
* @param options the test timeout or options
|
||||
*/
|
||||
failing(label: string, fn?: (() => void | Promise<unknown>) | ((done: (err?: unknown) => void) => void)): void;
|
||||
/**
|
||||
@@ -1778,10 +1783,6 @@ declare module "bun:test" {
|
||||
type MatcherContext = MatcherUtils & MatcherState;
|
||||
}
|
||||
|
||||
declare module "test" {
|
||||
export type * from "bun:test";
|
||||
}
|
||||
|
||||
declare namespace JestMock {
|
||||
/**
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
import { webcrypto } from "crypto";
|
||||
|
||||
webcrypto.CryptoKey;
|
||||
@@ -1,25 +0,0 @@
|
||||
import { expectType } from "./utilities.test";
|
||||
|
||||
declare module "bun" {
|
||||
interface Env {
|
||||
FOO: "FOO";
|
||||
}
|
||||
}
|
||||
expectType<"FOO">(process.env.FOO);
|
||||
|
||||
declare global {
|
||||
// eslint-disable-next-line @typescript-eslint/no-namespace
|
||||
namespace NodeJS {
|
||||
interface ProcessEnv {
|
||||
BAR: "BAR";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expectType<"BAR">(process.env.BAR);
|
||||
|
||||
process.env.FOO;
|
||||
process.env.BAR;
|
||||
process.env.OTHER;
|
||||
Bun.env.FOO;
|
||||
Bun.env.BAR;
|
||||
@@ -1,2 +0,0 @@
|
||||
Bun.spawn(["echo", '"hi"']);
|
||||
performance;
|
||||
@@ -1,178 +0,0 @@
|
||||
Bun.serve({
|
||||
fetch(req) {
|
||||
console.log(req.url); // => http://localhost:3000/
|
||||
return new Response("Hello World");
|
||||
},
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
fetch(req) {
|
||||
console.log(req.url); // => http://localhost:3000/
|
||||
return new Response("Hello World");
|
||||
},
|
||||
keyFile: "ca.pem",
|
||||
certFile: "cert.pem",
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
websocket: {
|
||||
message(ws, message) {
|
||||
ws.send(message);
|
||||
},
|
||||
},
|
||||
|
||||
fetch(req, server) {
|
||||
// Upgrade to a ServerWebSocket if we can
|
||||
// This automatically checks for the `Sec-WebSocket-Key` header
|
||||
// meaning you don't have to check headers, you can just call `upgrade()`
|
||||
if (server.upgrade(req)) {
|
||||
// When upgrading, we return undefined since we don't want to send a Response
|
||||
return;
|
||||
}
|
||||
|
||||
return new Response("Regular HTTP response");
|
||||
},
|
||||
});
|
||||
|
||||
Bun.serve<{
|
||||
name: string;
|
||||
}>({
|
||||
fetch(req, server) {
|
||||
const url = new URL(req.url);
|
||||
if (url.pathname === "/chat") {
|
||||
if (
|
||||
server.upgrade(req, {
|
||||
data: {
|
||||
name: new URL(req.url).searchParams.get("name") || "Friend",
|
||||
},
|
||||
headers: {
|
||||
"Set-Cookie": "name=" + new URL(req.url).searchParams.get("name"),
|
||||
},
|
||||
})
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return new Response("Expected a websocket connection", { status: 400 });
|
||||
},
|
||||
|
||||
websocket: {
|
||||
open(ws) {
|
||||
console.log("WebSocket opened");
|
||||
ws.subscribe("the-group-chat");
|
||||
},
|
||||
|
||||
message(ws, message) {
|
||||
ws.publish("the-group-chat", `${ws.data.name}: ${message.toString()}`);
|
||||
},
|
||||
|
||||
close(ws, code, reason) {
|
||||
ws.publish("the-group-chat", `${ws.data.name} left the chat`);
|
||||
},
|
||||
|
||||
drain(ws) {
|
||||
console.log("Please send me data. I am ready to receive it.");
|
||||
},
|
||||
|
||||
perMessageDeflate: true,
|
||||
},
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
fetch(req) {
|
||||
throw new Error("woops!");
|
||||
},
|
||||
error(error) {
|
||||
return new Response(`<pre>${error.message}\n${error.stack}</pre>`, {
|
||||
headers: {
|
||||
"Content-Type": "text/html",
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
export {};
|
||||
|
||||
Bun.serve({
|
||||
port: 1234,
|
||||
fetch(req, server) {
|
||||
server.upgrade(req);
|
||||
if (Math.random() > 0.5) return undefined;
|
||||
return new Response();
|
||||
},
|
||||
websocket: { message() {} },
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
unix: "/tmp/bun.sock",
|
||||
fetch() {
|
||||
return new Response();
|
||||
},
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
unix: "/tmp/bun.sock",
|
||||
fetch(req, server) {
|
||||
server.upgrade(req);
|
||||
if (Math.random() > 0.5) return undefined;
|
||||
return new Response();
|
||||
},
|
||||
websocket: { message() {} },
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
unix: "/tmp/bun.sock",
|
||||
fetch() {
|
||||
return new Response();
|
||||
},
|
||||
tls: {},
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
unix: "/tmp/bun.sock",
|
||||
fetch(req, server) {
|
||||
server.upgrade(req);
|
||||
if (Math.random() > 0.5) return undefined;
|
||||
return new Response();
|
||||
},
|
||||
websocket: { message() {} },
|
||||
tls: {},
|
||||
});
|
||||
|
||||
Bun.serve({
|
||||
fetch(req, server) {
|
||||
server.upgrade(req);
|
||||
},
|
||||
websocket: {
|
||||
open(ws) {
|
||||
console.log("WebSocket opened");
|
||||
ws.subscribe("test-channel");
|
||||
},
|
||||
|
||||
message(ws, message) {
|
||||
ws.publish("test-channel", `${message.toString()}`);
|
||||
},
|
||||
perMessageDeflate: true,
|
||||
},
|
||||
});
|
||||
// Bun.serve({
|
||||
// unix: "/tmp/bun.sock",
|
||||
// // @ts-expect-error
|
||||
// port: 1234,
|
||||
// fetch() {
|
||||
// return new Response();
|
||||
// },
|
||||
// });
|
||||
|
||||
// Bun.serve({
|
||||
// unix: "/tmp/bun.sock",
|
||||
// // @ts-expect-error
|
||||
// port: 1234,
|
||||
// fetch(req, server) {
|
||||
// server.upgrade(req);
|
||||
// if (Math.random() > 0.5) return undefined;
|
||||
// return new Response();
|
||||
// },
|
||||
// websocket: { message() {} },
|
||||
// });
|
||||
@@ -1,23 +0,0 @@
|
||||
new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue("hello");
|
||||
controller.enqueue("world");
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// this will have type errors when lib.dom.d.ts is present
|
||||
// afaik this isn't fixable
|
||||
new ReadableStream({
|
||||
type: "direct",
|
||||
pull(controller) {
|
||||
// eslint-disable-next-line
|
||||
controller.write("hello");
|
||||
// eslint-disable-next-line
|
||||
controller.write("world");
|
||||
controller.close();
|
||||
},
|
||||
cancel() {
|
||||
// called if stream.cancel() is called
|
||||
},
|
||||
});
|
||||
@@ -1,8 +0,0 @@
|
||||
// eslint-disable-next-line @definitelytyped/no-unnecessary-generics
|
||||
export declare const expectType: <T>(expression: T) => void;
|
||||
// eslint-disable-next-line @definitelytyped/no-unnecessary-generics
|
||||
export declare const expectAssignable: <T>(expression: T) => void;
|
||||
// eslint-disable-next-line @definitelytyped/no-unnecessary-generics
|
||||
export declare const expectNotAssignable: <T>(expression: any) => void;
|
||||
// eslint-disable-next-line @definitelytyped/no-unnecessary-generics
|
||||
export declare const expectTypeEquals: <T, S>(expression: T extends S ? (S extends T ? true : false) : false) => void;
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"skipLibCheck": true,
|
||||
"declaration": true,
|
||||
"emitDeclarationOnly": true,
|
||||
"noEmit": false,
|
||||
"declarationDir": "out"
|
||||
},
|
||||
"include": ["**/*.ts"],
|
||||
"exclude": ["dist", "node_modules"]
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"skipLibCheck": true,
|
||||
"declaration": true,
|
||||
"emitDeclarationOnly": true,
|
||||
"noEmit": false,
|
||||
"declarationDir": "out"
|
||||
},
|
||||
"include": ["**/*.ts"],
|
||||
"exclude": ["dist", "node_modules"]
|
||||
}
|
||||
|
||||
433
packages/bun-types/wasm.d.ts
vendored
433
packages/bun-types/wasm.d.ts
vendored
@@ -1,270 +1,193 @@
|
||||
export {};
|
||||
|
||||
type _Global<T extends Bun.WebAssembly.ValueType = Bun.WebAssembly.ValueType> = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Global: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.Global<T>;
|
||||
|
||||
type _CompileError = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { CompileError: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.CompileError;
|
||||
|
||||
type _LinkError = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { LinkError: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.LinkError;
|
||||
|
||||
type _RuntimeError = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { RuntimeError: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.RuntimeError;
|
||||
|
||||
type _Memory = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Memory: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.Memory;
|
||||
|
||||
type _Instance = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Instance: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.Instance;
|
||||
|
||||
type _Module = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Module: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.Module;
|
||||
|
||||
type _Table = typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Table: infer T };
|
||||
}
|
||||
? T
|
||||
: Bun.WebAssembly.Table;
|
||||
|
||||
declare global {
|
||||
namespace Bun {
|
||||
namespace WebAssembly {
|
||||
type ImportExportKind = "function" | "global" | "memory" | "table";
|
||||
type TableKind = "anyfunc" | "externref";
|
||||
// eslint-disable-next-line @typescript-eslint/ban-types
|
||||
type ExportValue = Function | Global | WebAssembly.Memory | WebAssembly.Table;
|
||||
type Exports = Record<string, ExportValue>;
|
||||
type ImportValue = ExportValue | number;
|
||||
type Imports = Record<string, ModuleImports>;
|
||||
type ModuleImports = Record<string, ImportValue>;
|
||||
|
||||
interface ValueTypeMap {
|
||||
// eslint-disable-next-line @typescript-eslint/ban-types
|
||||
anyfunc: Function;
|
||||
externref: any;
|
||||
f32: number;
|
||||
f64: number;
|
||||
i32: number;
|
||||
i64: bigint;
|
||||
v128: never;
|
||||
}
|
||||
|
||||
type ValueType = keyof ValueTypeMap;
|
||||
|
||||
interface GlobalDescriptor<T extends ValueType = ValueType> {
|
||||
mutable?: boolean;
|
||||
value: T;
|
||||
}
|
||||
|
||||
interface Global<T extends ValueType = ValueType> {
|
||||
// <T extends ValueType = ValueType> {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Global/value) */
|
||||
value: ValueTypeMap[T];
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Global/valueOf) */
|
||||
valueOf(): ValueTypeMap[T];
|
||||
}
|
||||
|
||||
interface CompileError extends Error {}
|
||||
|
||||
interface LinkError extends Error {}
|
||||
|
||||
interface RuntimeError extends Error {}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Instance) */
|
||||
interface Instance {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Instance/exports) */
|
||||
readonly exports: Exports;
|
||||
}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory) */
|
||||
interface Memory {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory/buffer) */
|
||||
readonly buffer: ArrayBuffer;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory/grow) */
|
||||
grow(delta: number): number;
|
||||
}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module) */
|
||||
interface Module {}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table) */
|
||||
interface Table {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/length) */
|
||||
readonly length: number;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/get) */
|
||||
get(index: number): any;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/grow) */
|
||||
grow(delta: number, value?: any): number;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/set) */
|
||||
set(index: number, value?: any): void;
|
||||
}
|
||||
|
||||
interface MemoryDescriptor {
|
||||
initial: number;
|
||||
maximum?: number;
|
||||
shared?: boolean;
|
||||
}
|
||||
|
||||
interface ModuleExportDescriptor {
|
||||
kind: ImportExportKind;
|
||||
name: string;
|
||||
}
|
||||
|
||||
interface ModuleImportDescriptor {
|
||||
kind: ImportExportKind;
|
||||
module: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
interface TableDescriptor {
|
||||
element: TableKind;
|
||||
initial: number;
|
||||
maximum?: number;
|
||||
}
|
||||
|
||||
interface WebAssemblyInstantiatedSource {
|
||||
instance: Instance;
|
||||
module: Module;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
declare module "bun" {
|
||||
namespace WebAssembly {
|
||||
interface ValueTypeMap extends Bun.WebAssembly.ValueTypeMap {}
|
||||
interface GlobalDescriptor<T extends keyof ValueTypeMap = keyof ValueTypeMap>
|
||||
extends Bun.WebAssembly.GlobalDescriptor<T> {}
|
||||
interface MemoryDescriptor extends Bun.WebAssembly.MemoryDescriptor {}
|
||||
interface ModuleExportDescriptor extends Bun.WebAssembly.ModuleExportDescriptor {}
|
||||
interface ModuleImportDescriptor extends Bun.WebAssembly.ModuleImportDescriptor {}
|
||||
interface TableDescriptor extends Bun.WebAssembly.TableDescriptor {}
|
||||
interface WebAssemblyInstantiatedSource extends Bun.WebAssembly.WebAssemblyInstantiatedSource {}
|
||||
type ImportExportKind = "function" | "global" | "memory" | "table";
|
||||
type TableKind = "anyfunc" | "externref";
|
||||
type ExportValue = Function | Global | WebAssembly.Memory | WebAssembly.Table;
|
||||
type Exports = Record<string, ExportValue>;
|
||||
type ImportValue = ExportValue | number;
|
||||
type Imports = Record<string, ModuleImports>;
|
||||
type ModuleImports = Record<string, ImportValue>;
|
||||
|
||||
interface LinkError extends _LinkError {}
|
||||
var LinkError: {
|
||||
prototype: LinkError;
|
||||
new (message?: string): LinkError;
|
||||
(message?: string): LinkError;
|
||||
};
|
||||
|
||||
interface CompileError extends _CompileError {}
|
||||
var CompileError: typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { CompileError: infer T };
|
||||
interface ValueTypeMap {
|
||||
anyfunc: Function;
|
||||
externref: any;
|
||||
f32: number;
|
||||
f64: number;
|
||||
i32: number;
|
||||
i64: bigint;
|
||||
v128: never;
|
||||
}
|
||||
? T
|
||||
: {
|
||||
prototype: CompileError;
|
||||
new (message?: string): CompileError;
|
||||
(message?: string): CompileError;
|
||||
};
|
||||
|
||||
interface RuntimeError extends _RuntimeError {}
|
||||
var RuntimeError: {
|
||||
prototype: RuntimeError;
|
||||
new (message?: string): RuntimeError;
|
||||
(message?: string): RuntimeError;
|
||||
};
|
||||
type ValueType = keyof ValueTypeMap;
|
||||
|
||||
interface Global<T extends keyof ValueTypeMap = keyof ValueTypeMap> extends _Global<T> {}
|
||||
var Global: typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Global: infer T };
|
||||
interface GlobalDescriptor<T extends ValueType = ValueType> {
|
||||
mutable?: boolean;
|
||||
value: T;
|
||||
}
|
||||
? T
|
||||
: {
|
||||
prototype: Global;
|
||||
new <T extends Bun.WebAssembly.ValueType = Bun.WebAssembly.ValueType>(
|
||||
descriptor: GlobalDescriptor<T>,
|
||||
v?: ValueTypeMap[T],
|
||||
): Global<T>;
|
||||
};
|
||||
|
||||
interface Instance extends _Instance {}
|
||||
var Instance: typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Instance: infer T };
|
||||
interface Global<T extends ValueType = ValueType> {
|
||||
// <T extends ValueType = ValueType> {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Global/value) */
|
||||
value: ValueTypeMap[T];
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Global/valueOf) */
|
||||
valueOf(): ValueTypeMap[T];
|
||||
}
|
||||
? T
|
||||
: {
|
||||
prototype: Instance;
|
||||
new (module: Module, importObject?: Bun.WebAssembly.Imports): Instance;
|
||||
};
|
||||
|
||||
interface Memory extends _Memory {}
|
||||
var Memory: {
|
||||
prototype: Memory;
|
||||
new (descriptor: MemoryDescriptor): Memory;
|
||||
};
|
||||
interface CompileError extends Error {}
|
||||
|
||||
interface Module extends _Module {}
|
||||
var Module: typeof globalThis extends {
|
||||
onerror: any;
|
||||
WebAssembly: { Module: infer T };
|
||||
interface LinkError extends Error {}
|
||||
|
||||
interface RuntimeError extends Error {}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Instance) */
|
||||
interface Instance {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Instance/exports) */
|
||||
readonly exports: Exports;
|
||||
}
|
||||
? T
|
||||
: {
|
||||
prototype: Module;
|
||||
new (bytes: Bun.BufferSource): Module;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module/customSections) */
|
||||
customSections(moduleObject: Module, sectionName: string): ArrayBuffer[];
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module/exports) */
|
||||
exports(moduleObject: Module): ModuleExportDescriptor[];
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module/imports) */
|
||||
imports(moduleObject: Module): ModuleImportDescriptor[];
|
||||
};
|
||||
|
||||
interface Table extends _Table {}
|
||||
var Table: {
|
||||
prototype: Table;
|
||||
new (descriptor: TableDescriptor, value?: any): Table;
|
||||
};
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory) */
|
||||
interface Memory {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory/buffer) */
|
||||
readonly buffer: ArrayBuffer;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory/grow) */
|
||||
grow(delta: number): number;
|
||||
}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/compile) */
|
||||
function compile(bytes: Bun.BufferSource): Promise<Module>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/compileStreaming) */
|
||||
function compileStreaming(source: Response | PromiseLike<Response>): Promise<Module>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiate) */
|
||||
function instantiate(
|
||||
bytes: Bun.BufferSource,
|
||||
importObject?: Bun.WebAssembly.Imports,
|
||||
): Promise<WebAssemblyInstantiatedSource>;
|
||||
function instantiate(moduleObject: Module, importObject?: Bun.WebAssembly.Imports): Promise<Instance>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiateStreaming) */
|
||||
function instantiateStreaming(
|
||||
source: Response | PromiseLike<Response>,
|
||||
importObject?: Bun.WebAssembly.Imports,
|
||||
): Promise<WebAssemblyInstantiatedSource>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/validate) */
|
||||
function validate(bytes: Bun.BufferSource): boolean;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module) */
|
||||
interface Module {}
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table) */
|
||||
interface Table {
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/length) */
|
||||
readonly length: number;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/get) */
|
||||
get(index: number): any;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/grow) */
|
||||
grow(delta: number, value?: any): number;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table/set) */
|
||||
set(index: number, value?: any): void;
|
||||
}
|
||||
|
||||
interface MemoryDescriptor {
|
||||
initial: number;
|
||||
maximum?: number;
|
||||
shared?: boolean;
|
||||
}
|
||||
|
||||
interface ModuleExportDescriptor {
|
||||
kind: ImportExportKind;
|
||||
name: string;
|
||||
}
|
||||
|
||||
interface ModuleImportDescriptor {
|
||||
kind: ImportExportKind;
|
||||
module: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
interface TableDescriptor {
|
||||
element: TableKind;
|
||||
initial: number;
|
||||
maximum?: number;
|
||||
}
|
||||
|
||||
interface WebAssemblyInstantiatedSource {
|
||||
instance: Instance;
|
||||
module: Module;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
declare namespace WebAssembly {
|
||||
interface ValueTypeMap extends Bun.WebAssembly.ValueTypeMap {}
|
||||
interface GlobalDescriptor<T extends keyof ValueTypeMap = keyof ValueTypeMap>
|
||||
extends Bun.WebAssembly.GlobalDescriptor<T> {}
|
||||
interface MemoryDescriptor extends Bun.WebAssembly.MemoryDescriptor {}
|
||||
interface ModuleExportDescriptor extends Bun.WebAssembly.ModuleExportDescriptor {}
|
||||
interface ModuleImportDescriptor extends Bun.WebAssembly.ModuleImportDescriptor {}
|
||||
interface TableDescriptor extends Bun.WebAssembly.TableDescriptor {}
|
||||
interface WebAssemblyInstantiatedSource extends Bun.WebAssembly.WebAssemblyInstantiatedSource {}
|
||||
|
||||
interface LinkError extends Bun.WebAssembly.LinkError {}
|
||||
var LinkError: {
|
||||
prototype: LinkError;
|
||||
new (message?: string): LinkError;
|
||||
(message?: string): LinkError;
|
||||
};
|
||||
|
||||
interface CompileError extends Bun.WebAssembly.CompileError {}
|
||||
var CompileError: {
|
||||
prototype: CompileError;
|
||||
new (message?: string): CompileError;
|
||||
(message?: string): CompileError;
|
||||
};
|
||||
|
||||
interface RuntimeError extends Bun.WebAssembly.RuntimeError {}
|
||||
var RuntimeError: {
|
||||
prototype: RuntimeError;
|
||||
new (message?: string): RuntimeError;
|
||||
(message?: string): RuntimeError;
|
||||
};
|
||||
|
||||
interface Global<T extends keyof ValueTypeMap = keyof ValueTypeMap> extends Bun.WebAssembly.Global<T> {}
|
||||
var Global: {
|
||||
prototype: Global;
|
||||
new <T extends Bun.WebAssembly.ValueType = Bun.WebAssembly.ValueType>(
|
||||
descriptor: GlobalDescriptor<T>,
|
||||
v?: ValueTypeMap[T],
|
||||
): Global<T>;
|
||||
};
|
||||
|
||||
interface Instance extends Bun.WebAssembly.Instance {}
|
||||
var Instance: {
|
||||
prototype: Instance;
|
||||
new (module: Module, importObject?: Bun.WebAssembly.Imports): Instance;
|
||||
};
|
||||
|
||||
interface Memory extends Bun.WebAssembly.Memory {}
|
||||
var Memory: {
|
||||
prototype: Memory;
|
||||
new (descriptor: MemoryDescriptor): Memory;
|
||||
};
|
||||
|
||||
interface Module extends Bun.WebAssembly.Module {}
|
||||
var Module: Bun.__internal.UseLibDomIfAvailable<
|
||||
"WebAssembly",
|
||||
{
|
||||
Module: {
|
||||
prototype: Module;
|
||||
new (bytes: Bun.BufferSource): Module;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module/customSections) */
|
||||
customSections(moduleObject: Module, sectionName: string): ArrayBuffer[];
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module/exports) */
|
||||
exports(moduleObject: Module): ModuleExportDescriptor[];
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module/imports) */
|
||||
imports(moduleObject: Module): ModuleImportDescriptor[];
|
||||
};
|
||||
}
|
||||
>["Module"];
|
||||
|
||||
interface Table extends Bun.WebAssembly.Table {}
|
||||
var Table: {
|
||||
prototype: Table;
|
||||
new (descriptor: TableDescriptor, value?: any): Table;
|
||||
};
|
||||
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/compile) */
|
||||
function compile(bytes: Bun.BufferSource): Promise<Module>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/compileStreaming) */
|
||||
function compileStreaming(source: Response | PromiseLike<Response>): Promise<Module>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiate) */
|
||||
function instantiate(
|
||||
bytes: Bun.BufferSource,
|
||||
importObject?: Bun.WebAssembly.Imports,
|
||||
): Promise<WebAssemblyInstantiatedSource>;
|
||||
function instantiate(moduleObject: Module, importObject?: Bun.WebAssembly.Imports): Promise<Instance>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiateStreaming) */
|
||||
function instantiateStreaming(
|
||||
source: Response | PromiseLike<Response>,
|
||||
importObject?: Bun.WebAssembly.Imports,
|
||||
): Promise<WebAssemblyInstantiatedSource>;
|
||||
/** [MDN Reference](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/validate) */
|
||||
function validate(bytes: Bun.BufferSource): boolean;
|
||||
}
|
||||
|
||||
@@ -369,9 +369,11 @@ struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_co
|
||||
ls->s.context = context;
|
||||
ls->s.timeout = 255;
|
||||
ls->s.long_timeout = 255;
|
||||
ls->s.low_prio_state = 0;
|
||||
ls->s.flags.low_prio_state = 0;
|
||||
ls->s.flags.is_paused = 0;
|
||||
|
||||
ls->s.next = 0;
|
||||
ls->s.allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
ls->s.flags.allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
us_internal_socket_context_link_listen_socket(context, ls);
|
||||
|
||||
ls->socket_ext_size = socket_ext_size;
|
||||
@@ -401,10 +403,10 @@ struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, struct us_sock
|
||||
ls->s.context = context;
|
||||
ls->s.timeout = 255;
|
||||
ls->s.long_timeout = 255;
|
||||
ls->s.low_prio_state = 0;
|
||||
ls->s.flags.low_prio_state = 0;
|
||||
ls->s.next = 0;
|
||||
ls->s.allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
|
||||
ls->s.flags.allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
ls->s.flags.is_paused = 0;
|
||||
us_internal_socket_context_link_listen_socket(context, ls);
|
||||
|
||||
ls->socket_ext_size = socket_ext_size;
|
||||
@@ -432,9 +434,11 @@ struct us_socket_t* us_socket_context_connect_resolved_dns(struct us_socket_cont
|
||||
socket->context = context;
|
||||
socket->timeout = 255;
|
||||
socket->long_timeout = 255;
|
||||
socket->low_prio_state = 0;
|
||||
socket->flags.low_prio_state = 0;
|
||||
socket->flags.allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
socket->flags.is_paused = 0;
|
||||
socket->connect_state = NULL;
|
||||
socket->allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
|
||||
|
||||
us_internal_socket_context_link_socket(context, socket);
|
||||
|
||||
@@ -556,8 +560,9 @@ int start_connections(struct us_connecting_socket_t *c, int count) {
|
||||
s->context = c->context;
|
||||
s->timeout = c->timeout;
|
||||
s->long_timeout = c->long_timeout;
|
||||
s->low_prio_state = 0;
|
||||
s->allow_half_open = (c->options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
s->flags.low_prio_state = 0;
|
||||
s->flags.allow_half_open = (c->options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
s->flags.is_paused = 0;
|
||||
/* Link it into context so that timeout fires properly */
|
||||
us_internal_socket_context_link_socket(s->context, s);
|
||||
|
||||
@@ -731,9 +736,10 @@ struct us_socket_t *us_socket_context_connect_unix(int ssl, struct us_socket_con
|
||||
connect_socket->context = context;
|
||||
connect_socket->timeout = 255;
|
||||
connect_socket->long_timeout = 255;
|
||||
connect_socket->low_prio_state = 0;
|
||||
connect_socket->flags.low_prio_state = 0;
|
||||
connect_socket->connect_state = NULL;
|
||||
connect_socket->allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
connect_socket->flags.allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
connect_socket->flags.is_paused = 0;
|
||||
us_internal_socket_context_link_socket(context, connect_socket);
|
||||
|
||||
return connect_socket;
|
||||
@@ -764,7 +770,7 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
return s;
|
||||
}
|
||||
|
||||
if (s->low_prio_state != 1) {
|
||||
if (s->flags.low_prio_state != 1) {
|
||||
/* We need to be sure that we still holding a reference*/
|
||||
us_socket_context_ref(ssl, context);
|
||||
/* This properly updates the iterator if in on_timeout */
|
||||
@@ -788,7 +794,7 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
new_s->timeout = 255;
|
||||
new_s->long_timeout = 255;
|
||||
|
||||
if (new_s->low_prio_state == 1) {
|
||||
if (new_s->flags.low_prio_state == 1) {
|
||||
/* update pointers in low-priority queue */
|
||||
if (!new_s->prev) new_s->context->loop->data.low_prio_head = new_s;
|
||||
else new_s->prev->next = new_s;
|
||||
|
||||
@@ -353,15 +353,21 @@ int kqueue_change(int kqfd, int fd, int old_events, int new_events, void *user_d
|
||||
int change_length = 0;
|
||||
|
||||
/* Do they differ in readable? */
|
||||
int is_readable = (new_events & LIBUS_SOCKET_READABLE);
|
||||
int is_writable = (new_events & LIBUS_SOCKET_WRITABLE);
|
||||
if ((new_events & LIBUS_SOCKET_READABLE) != (old_events & LIBUS_SOCKET_READABLE)) {
|
||||
EV_SET64(&change_list[change_length++], fd, EVFILT_READ, (new_events & LIBUS_SOCKET_READABLE) ? EV_ADD : EV_DELETE, 0, 0, (uint64_t)(void*)user_data, 0, 0);
|
||||
EV_SET64(&change_list[change_length++], fd, EVFILT_READ, is_readable ? EV_ADD : EV_DELETE, 0, 0, (uint64_t)(void*)user_data, 0, 0);
|
||||
}
|
||||
|
||||
/* Do they differ in writable? */
|
||||
if ((new_events & LIBUS_SOCKET_WRITABLE) != (old_events & LIBUS_SOCKET_WRITABLE)) {
|
||||
|
||||
if(!is_readable && !is_writable) {
|
||||
if(!(old_events & LIBUS_SOCKET_WRITABLE)) {
|
||||
// if we are not reading or writing, we need to add writable to receive FIN
|
||||
EV_SET64(&change_list[change_length++], fd, EVFILT_WRITE, EV_ADD, 0, 0, (uint64_t)(void*)user_data, 0, 0);
|
||||
}
|
||||
} else if ((new_events & LIBUS_SOCKET_WRITABLE) != (old_events & LIBUS_SOCKET_WRITABLE)) {
|
||||
/* Do they differ in writable? */
|
||||
EV_SET64(&change_list[change_length++], fd, EVFILT_WRITE, (new_events & LIBUS_SOCKET_WRITABLE) ? EV_ADD : EV_DELETE, 0, 0, (uint64_t)(void*)user_data, 0, 0);
|
||||
}
|
||||
|
||||
}
|
||||
int ret;
|
||||
do {
|
||||
ret = kevent64(kqfd, change_list, change_length, change_list, change_length, KEVENT_FLAG_ERROR_EVENTS, NULL);
|
||||
@@ -399,6 +405,10 @@ int us_poll_start_rc(struct us_poll_t *p, struct us_loop_t *loop, int events) {
|
||||
|
||||
#ifdef LIBUS_USE_EPOLL
|
||||
struct epoll_event event;
|
||||
if(!(events & LIBUS_SOCKET_READABLE) && !(events & LIBUS_SOCKET_WRITABLE)) {
|
||||
// if we are disabling readable, we need to add the other events to detect EOF/HUP/ERR
|
||||
events |= EPOLLRDHUP | EPOLLHUP | EPOLLERR;
|
||||
}
|
||||
event.events = events;
|
||||
event.data.ptr = p;
|
||||
int ret;
|
||||
@@ -423,6 +433,10 @@ void us_poll_change(struct us_poll_t *p, struct us_loop_t *loop, int events) {
|
||||
|
||||
#ifdef LIBUS_USE_EPOLL
|
||||
struct epoll_event event;
|
||||
if(!(events & LIBUS_SOCKET_READABLE) && !(events & LIBUS_SOCKET_WRITABLE)) {
|
||||
// if we are disabling readable, we need to add the other events to detect EOF/HUP/ERR
|
||||
events |= EPOLLRDHUP | EPOLLHUP | EPOLLERR;
|
||||
}
|
||||
event.events = events;
|
||||
event.data.ptr = p;
|
||||
int rc;
|
||||
|
||||
@@ -104,7 +104,6 @@ void us_poll_change(struct us_poll_t *p, struct us_loop_t *loop, int events) {
|
||||
us_internal_poll_type(p) |
|
||||
((events & LIBUS_SOCKET_READABLE) ? POLL_TYPE_POLLING_IN : 0) |
|
||||
((events & LIBUS_SOCKET_WRITABLE) ? POLL_TYPE_POLLING_OUT : 0);
|
||||
|
||||
uv_poll_start(p->uv_p, events, poll_cb);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,6 +70,7 @@ void us_internal_loop_update_pending_ready_polls(struct us_loop_t *loop,
|
||||
#define IS_EINTR(rc) (rc == -1 && errno == EINTR)
|
||||
#define LIBUS_ERR errno
|
||||
#endif
|
||||
#include <stdbool.h>
|
||||
/* Poll type and what it polls for */
|
||||
enum {
|
||||
/* Three first bits */
|
||||
@@ -161,14 +162,22 @@ us_internal_ssl_socket_close(us_internal_ssl_socket_r s, int code,
|
||||
int us_internal_handle_dns_results(us_loop_r loop);
|
||||
|
||||
/* Sockets are polls */
|
||||
|
||||
struct us_socket_flags {
|
||||
/* If true, the readable side is paused */
|
||||
bool is_paused: 1;
|
||||
/* Allow to stay alive after FIN/EOF */
|
||||
bool allow_half_open: 1;
|
||||
/* 0 = not in low-prio queue, 1 = is in low-prio queue, 2 = was in low-prio queue in this iteration */
|
||||
unsigned char low_prio_state: 2;
|
||||
|
||||
} __attribute__((packed));
|
||||
|
||||
struct us_socket_t {
|
||||
alignas(LIBUS_EXT_ALIGNMENT) struct us_poll_t p; // 4 bytes
|
||||
unsigned char timeout; // 1 byte
|
||||
unsigned char long_timeout; // 1 byte
|
||||
unsigned char
|
||||
low_prio_state; /* 0 = not in low-prio queue, 1 = is in low-prio queue, 2
|
||||
= was in low-prio queue in this iteration */
|
||||
unsigned char allow_half_open; /* Allow to stay alive after FIN/EOF */
|
||||
struct us_socket_flags flags;
|
||||
|
||||
struct us_socket_context_t *context;
|
||||
struct us_socket_t *prev, *next;
|
||||
|
||||
@@ -472,6 +472,7 @@ int us_socket_raw_write(int ssl, us_socket_r s, const char *data, int length, in
|
||||
struct us_socket_t* us_socket_open(int ssl, struct us_socket_t * s, int is_client, char* ip, int ip_length);
|
||||
int us_raw_root_certs(struct us_cert_string_t**out);
|
||||
unsigned int us_get_remote_address_info(char *buf, us_socket_r s, const char **dest, int *port, int *is_ipv6);
|
||||
unsigned int us_get_local_address_info(char *buf, us_socket_r s, const char **dest, int *port, int *is_ipv6);
|
||||
int us_socket_get_error(int ssl, us_socket_r s);
|
||||
|
||||
void us_socket_ref(us_socket_r s);
|
||||
|
||||
@@ -164,7 +164,7 @@ void us_internal_handle_low_priority_sockets(struct us_loop_t *loop) {
|
||||
us_internal_socket_context_link_socket(s->context, s);
|
||||
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) | LIBUS_SOCKET_READABLE);
|
||||
|
||||
s->low_prio_state = 2;
|
||||
s->flags.low_prio_state = 2;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,21 +213,21 @@ void us_internal_free_closed_sockets(struct us_loop_t *loop) {
|
||||
us_poll_free((struct us_poll_t *) s, loop);
|
||||
s = next;
|
||||
}
|
||||
loop->data.closed_head = 0;
|
||||
loop->data.closed_head = NULL;
|
||||
|
||||
for (struct us_udp_socket_t *s = loop->data.closed_udp_head; s; ) {
|
||||
struct us_udp_socket_t *next = s->next;
|
||||
us_poll_free((struct us_poll_t *) s, loop);
|
||||
s = next;
|
||||
}
|
||||
loop->data.closed_udp_head = 0;
|
||||
loop->data.closed_udp_head = NULL;
|
||||
|
||||
for (struct us_connecting_socket_t *s = loop->data.closed_connecting_head; s; ) {
|
||||
struct us_connecting_socket_t *next = s->next;
|
||||
us_free(s);
|
||||
s = next;
|
||||
}
|
||||
loop->data.closed_connecting_head = 0;
|
||||
loop->data.closed_connecting_head = NULL;
|
||||
}
|
||||
|
||||
void us_internal_free_closed_contexts(struct us_loop_t *loop) {
|
||||
@@ -236,7 +236,7 @@ void us_internal_free_closed_contexts(struct us_loop_t *loop) {
|
||||
us_free(ctx);
|
||||
ctx = next;
|
||||
}
|
||||
loop->data.closed_context_head = 0;
|
||||
loop->data.closed_context_head = NULL;
|
||||
}
|
||||
|
||||
void sweep_timer_cb(struct us_internal_callback_t *cb) {
|
||||
@@ -310,9 +310,9 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int eof, in
|
||||
s->connect_state = NULL;
|
||||
s->timeout = 255;
|
||||
s->long_timeout = 255;
|
||||
s->low_prio_state = 0;
|
||||
s->allow_half_open = listen_socket->s.allow_half_open;
|
||||
|
||||
s->flags.low_prio_state = 0;
|
||||
s->flags.allow_half_open = listen_socket->s.flags.allow_half_open;
|
||||
s->flags.is_paused = 0;
|
||||
|
||||
/* We always use nodelay */
|
||||
bsd_socket_nodelay(client_fd, 1);
|
||||
@@ -358,8 +358,8 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int eof, in
|
||||
* SSL handshakes are CPU intensive, so we limit the number of handshakes per loop iteration, and move the rest
|
||||
* to the low-priority queue */
|
||||
if (s->context->is_low_prio(s)) {
|
||||
if (s->low_prio_state == 2) {
|
||||
s->low_prio_state = 0; /* Socket has been delayed and now it's time to process incoming data for one iteration */
|
||||
if (s->flags.low_prio_state == 2) {
|
||||
s->flags.low_prio_state = 0; /* Socket has been delayed and now it's time to process incoming data for one iteration */
|
||||
} else if (s->context->loop->data.low_prio_budget > 0) {
|
||||
s->context->loop->data.low_prio_budget--; /* Still having budget for this iteration - do normal processing */
|
||||
} else {
|
||||
@@ -375,7 +375,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int eof, in
|
||||
if (s->next) s->next->prev = s;
|
||||
s->context->loop->data.low_prio_head = s;
|
||||
|
||||
s->low_prio_state = 1;
|
||||
s->flags.low_prio_state = 1;
|
||||
|
||||
break;
|
||||
}
|
||||
@@ -439,7 +439,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int eof, in
|
||||
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
|
||||
return;
|
||||
}
|
||||
if(s->allow_half_open) {
|
||||
if(s->flags.allow_half_open) {
|
||||
/* We got a Error but is EOF and we allow half open so stop polling for readable and keep going*/
|
||||
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
||||
s = s->context->on_end(s);
|
||||
|
||||
@@ -37,6 +37,15 @@ int us_socket_local_port(int ssl, struct us_socket_t *s) {
|
||||
}
|
||||
}
|
||||
|
||||
int us_socket_remote_port(int ssl, struct us_socket_t *s) {
|
||||
struct bsd_addr_t addr;
|
||||
if (bsd_remote_addr(us_poll_fd(&s->p), &addr)) {
|
||||
return -1;
|
||||
} else {
|
||||
return bsd_addr_get_port(&addr);
|
||||
}
|
||||
}
|
||||
|
||||
void us_socket_shutdown_read(int ssl, struct us_socket_t *s) {
|
||||
/* This syscall is idempotent so no extra check is needed */
|
||||
bsd_shutdown_socket_read(us_poll_fd((struct us_poll_t *) s));
|
||||
@@ -178,7 +187,7 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
|
||||
/* make sure the context is alive until the callback ends */
|
||||
us_socket_context_ref(ssl, s->context);
|
||||
|
||||
if (s->low_prio_state == 1) {
|
||||
if (s->flags.low_prio_state == 1) {
|
||||
/* Unlink this socket from the low-priority queue */
|
||||
if (!s->prev) s->context->loop->data.low_prio_head = s->next;
|
||||
else s->prev->next = s->next;
|
||||
@@ -187,7 +196,7 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
|
||||
|
||||
s->prev = 0;
|
||||
s->next = 0;
|
||||
s->low_prio_state = 0;
|
||||
s->flags.low_prio_state = 0;
|
||||
us_socket_context_unref(ssl, s->context);
|
||||
} else {
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
@@ -238,7 +247,7 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
|
||||
// - does not close
|
||||
struct us_socket_t *us_socket_detach(int ssl, struct us_socket_t *s) {
|
||||
if (!us_socket_is_closed(0, s)) {
|
||||
if (s->low_prio_state == 1) {
|
||||
if (s->flags.low_prio_state == 1) {
|
||||
/* Unlink this socket from the low-priority queue */
|
||||
if (!s->prev) s->context->loop->data.low_prio_head = s->next;
|
||||
else s->prev->next = s->next;
|
||||
@@ -247,7 +256,7 @@ struct us_socket_t *us_socket_detach(int ssl, struct us_socket_t *s) {
|
||||
|
||||
s->prev = 0;
|
||||
s->next = 0;
|
||||
s->low_prio_state = 0;
|
||||
s->flags.low_prio_state = 0;
|
||||
us_socket_context_unref(ssl, s->context);
|
||||
|
||||
} else {
|
||||
@@ -277,7 +286,7 @@ struct us_socket_t *us_socket_attach(int ssl, LIBUS_SOCKET_DESCRIPTOR client_fd,
|
||||
|
||||
s->context = ctx;
|
||||
s->timeout = 0;
|
||||
s->low_prio_state = 0;
|
||||
s->flags.low_prio_state = 0;
|
||||
|
||||
/* We always use nodelay */
|
||||
bsd_socket_nodelay(client_fd, 1);
|
||||
@@ -330,7 +339,9 @@ struct us_socket_t *us_socket_from_fd(struct us_socket_context_t *ctx, int socke
|
||||
s->context = ctx;
|
||||
s->timeout = 0;
|
||||
s->long_timeout = 0;
|
||||
s->low_prio_state = 0;
|
||||
s->flags.low_prio_state = 0;
|
||||
s->flags.is_paused = 0;
|
||||
s->flags.allow_half_open = 0;
|
||||
|
||||
/* We always use nodelay */
|
||||
bsd_socket_nodelay(fd, 1);
|
||||
@@ -501,6 +512,24 @@ unsigned int us_get_remote_address_info(char *buf, struct us_socket_t *s, const
|
||||
return length;
|
||||
}
|
||||
|
||||
unsigned int us_get_local_address_info(char *buf, struct us_socket_t *s, const char **dest, int *port, int *is_ipv6)
|
||||
{
|
||||
struct bsd_addr_t addr;
|
||||
if (bsd_local_addr(us_poll_fd(&s->p), &addr)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int length = bsd_addr_get_ip_length(&addr);
|
||||
if (!length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy(buf, bsd_addr_get_ip(&addr), length);
|
||||
*port = bsd_addr_get_port(&addr);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
void us_socket_ref(struct us_socket_t *s) {
|
||||
#ifdef LIBUS_USE_LIBUV
|
||||
uv_ref((uv_handle_t*)s->p.uv_p);
|
||||
@@ -538,18 +567,17 @@ struct us_loop_t *us_connecting_socket_get_loop(struct us_connecting_socket_t *c
|
||||
}
|
||||
|
||||
void us_socket_pause(int ssl, struct us_socket_t *s) {
|
||||
if(s->flags.is_paused) return;
|
||||
// closed cannot be paused because it is already closed
|
||||
if(us_socket_is_closed(ssl, s)) return;
|
||||
if(us_socket_is_shut_down(ssl, s)) {
|
||||
// we already sent FIN so we pause all events because we are read-only
|
||||
us_poll_change(&s->p, s->context->loop, 0);
|
||||
return;
|
||||
}
|
||||
// we are readable and writable so we can just pause readable side
|
||||
us_poll_change(&s->p, s->context->loop, LIBUS_SOCKET_WRITABLE);
|
||||
s->flags.is_paused = 1;
|
||||
}
|
||||
|
||||
void us_socket_resume(int ssl, struct us_socket_t *s) {
|
||||
if(!s->flags.is_paused) return;
|
||||
s->flags.is_paused = 0;
|
||||
// closed cannot be resumed
|
||||
if(us_socket_is_closed(ssl, s)) return;
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ public:
|
||||
getLoopData()->setCorkedSocket(this, SSL);
|
||||
}
|
||||
|
||||
/* Returns wheter we are corked or not */
|
||||
/* Returns whether we are corked */
|
||||
bool isCorked() {
|
||||
return getLoopData()->isCorkedWith(this);
|
||||
}
|
||||
@@ -182,9 +182,9 @@ public:
|
||||
}
|
||||
|
||||
/* Returns the user space backpressure. */
|
||||
unsigned int getBufferedAmount() {
|
||||
size_t getBufferedAmount() {
|
||||
/* We return the actual amount of bytes in backbuffer, including pendingRemoval */
|
||||
return (unsigned int) getAsyncSocketData()->buffer.totalLength();
|
||||
return getAsyncSocketData()->buffer.totalLength();
|
||||
}
|
||||
|
||||
/* Returns the text representation of an IPv4 or IPv6 address */
|
||||
@@ -222,6 +222,63 @@ public:
|
||||
return addressAsText(getRemoteAddress());
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes the socket buffer by writing as much data as possible to the underlying socket.
|
||||
*
|
||||
* @return The total number of bytes successfully written to the socket
|
||||
*/
|
||||
size_t flush() {
|
||||
/* Check if socket is valid for operations */
|
||||
if (us_socket_is_closed(SSL, (us_socket_t *) this)) {
|
||||
/* Socket is closed, no flushing is possible */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get the associated asynchronous socket data structure */
|
||||
AsyncSocketData<SSL> *asyncSocketData = getAsyncSocketData();
|
||||
size_t total_written = 0;
|
||||
|
||||
/* Continue flushing as long as we have data in the buffer */
|
||||
while (asyncSocketData->buffer.length()) {
|
||||
/* Get current buffer size */
|
||||
size_t buffer_len = asyncSocketData->buffer.length();
|
||||
|
||||
/* Limit write size to INT_MAX as the underlying socket API uses int for length */
|
||||
int max_flush_len = std::min(buffer_len, (size_t)INT_MAX);
|
||||
|
||||
/* Attempt to write data to the socket */
|
||||
int written = us_socket_write(SSL, (us_socket_t *) this, asyncSocketData->buffer.data(), max_flush_len, 0);
|
||||
total_written += written;
|
||||
|
||||
/* Check if we couldn't write the entire buffer */
|
||||
if ((unsigned int) written < buffer_len) {
|
||||
/* Remove the successfully written data from the buffer */
|
||||
asyncSocketData->buffer.erase((unsigned int) written);
|
||||
|
||||
/* If we wrote less than we attempted, the socket buffer is likely full
|
||||
* likely is used as an optimization hint to the compiler
|
||||
* since written < buffer_len is very likely to be true
|
||||
*/
|
||||
if(written < max_flush_len) {
|
||||
[[likely]]
|
||||
/* Cannot write more at this time, return what we've written so far */
|
||||
return total_written;
|
||||
}
|
||||
/* If we wrote exactly max_flush_len, we might be able to write more, so continue
|
||||
* This is unlikely to happen, because this would be INT_MAX bytes, which is unlikely to be written in one go
|
||||
* but we keep this check for completeness
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Successfully wrote the entire buffer, clear the buffer */
|
||||
asyncSocketData->buffer.clear();
|
||||
}
|
||||
|
||||
/* Return the total number of bytes written during this flush operation */
|
||||
return total_written;
|
||||
}
|
||||
|
||||
/* Write in three levels of prioritization: cork-buffer, syscall, socket-buffer. Always drain if possible.
|
||||
* Returns pair of bytes written (anywhere) and wheter or not this call resulted in the polling for
|
||||
* writable (or we are in a state that implies polling for writable). */
|
||||
@@ -233,7 +290,6 @@ public:
|
||||
|
||||
LoopData *loopData = getLoopData();
|
||||
AsyncSocketData<SSL> *asyncSocketData = getAsyncSocketData();
|
||||
|
||||
/* We are limited if we have a per-socket buffer */
|
||||
if (asyncSocketData->buffer.length()) {
|
||||
size_t buffer_len = asyncSocketData->buffer.length();
|
||||
@@ -261,7 +317,7 @@ public:
|
||||
asyncSocketData->buffer.clear();
|
||||
}
|
||||
|
||||
if (length) {
|
||||
if (length) {
|
||||
if (loopData->isCorkedWith(this)) {
|
||||
/* We are corked */
|
||||
if (LoopData::CORK_BUFFER_SIZE - loopData->getCorkOffset() >= (unsigned int) length) {
|
||||
|
||||
@@ -125,7 +125,7 @@ private:
|
||||
}
|
||||
|
||||
/* Signal broken HTTP request only if we have a pending request */
|
||||
if (httpResponseData->onAborted) {
|
||||
if (httpResponseData->onAborted != nullptr && httpResponseData->userData != nullptr) {
|
||||
httpResponseData->onAborted((HttpResponse<SSL> *)s, httpResponseData->userData);
|
||||
}
|
||||
|
||||
@@ -235,7 +235,7 @@ private:
|
||||
}
|
||||
|
||||
/* Returning from a request handler without responding or attaching an onAborted handler is ill-use */
|
||||
if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted) {
|
||||
if (!((HttpResponse<SSL> *) s)->hasResponded() && !httpResponseData->onAborted && !httpResponseData->socketData) {
|
||||
/* Throw exception here? */
|
||||
std::cerr << "Error: Returning from a request handler without responding or attaching an abort handler is forbidden!" << std::endl;
|
||||
std::terminate();
|
||||
@@ -365,11 +365,32 @@ private:
|
||||
auto *asyncSocket = reinterpret_cast<AsyncSocket<SSL> *>(s);
|
||||
auto *httpResponseData = reinterpret_cast<HttpResponseData<SSL> *>(asyncSocket->getAsyncSocketData());
|
||||
|
||||
/* Attempt to drain the socket buffer before triggering onWritable callback */
|
||||
size_t bufferedAmount = asyncSocket->getBufferedAmount();
|
||||
if (bufferedAmount > 0) {
|
||||
/* Try to flush pending data from the socket's buffer to the network */
|
||||
bufferedAmount -= asyncSocket->flush();
|
||||
|
||||
/* Check if there's still data waiting to be sent after flush attempt */
|
||||
if (bufferedAmount > 0) {
|
||||
/* Socket buffer is not completely empty yet
|
||||
* - Reset the timeout to prevent premature connection closure
|
||||
* - This allows time for another writable event or new request
|
||||
* - Return the socket to indicate we're still processing
|
||||
*/
|
||||
reinterpret_cast<HttpResponse<SSL> *>(s)->resetTimeout();
|
||||
return s;
|
||||
}
|
||||
/* If bufferedAmount is now 0, we've successfully flushed everything
|
||||
* and will fall through to the next section of code
|
||||
*/
|
||||
}
|
||||
|
||||
/* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */
|
||||
if (httpResponseData->onWritable) {
|
||||
/* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */
|
||||
us_socket_timeout(SSL, s, 0);
|
||||
|
||||
|
||||
/* We expect the developer to return whether or not write was successful (true).
|
||||
* If write was never called, the developer should still return true so that we may drain. */
|
||||
bool success = httpResponseData->callOnWritable(reinterpret_cast<HttpResponse<SSL> *>(asyncSocket), httpResponseData->offset);
|
||||
@@ -384,7 +405,7 @@ private:
|
||||
}
|
||||
|
||||
/* Drain any socket buffer, this might empty our backpressure and thus finish the request */
|
||||
/*auto [written, failed] = */asyncSocket->write(nullptr, 0, true, 0);
|
||||
asyncSocket->flush();
|
||||
|
||||
/* Should we close this connection after a response - and is this response really done? */
|
||||
if (httpResponseData->state & HttpResponseData<SSL>::HTTP_CONNECTION_CLOSE) {
|
||||
|
||||
@@ -122,15 +122,10 @@ public:
|
||||
|
||||
/* We do not have tryWrite-like functionalities, so ignore optional in this path */
|
||||
|
||||
/* Do not allow sending 0 chunk here */
|
||||
if (data.length()) {
|
||||
Super::write("\r\n", 2);
|
||||
writeUnsignedHex((unsigned int) data.length());
|
||||
Super::write("\r\n", 2);
|
||||
|
||||
/* Ignoring optional for now */
|
||||
Super::write(data.data(), (int) data.length());
|
||||
}
|
||||
|
||||
/* Write the chunked data if there is any (this will not send zero chunks) */
|
||||
this->write(data, nullptr);
|
||||
|
||||
|
||||
/* Terminating 0 chunk */
|
||||
Super::write("\r\n0\r\n\r\n", 7);
|
||||
@@ -480,6 +475,40 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t length = data.length();
|
||||
|
||||
// Special handling for extremely large data (greater than UINT_MAX bytes)
|
||||
// most clients expect a max of UINT_MAX, so we need to split the write into multiple writes
|
||||
if (length > UINT_MAX) {
|
||||
bool has_failed = false;
|
||||
size_t total_written = 0;
|
||||
// Process full-sized chunks until remaining data is less than UINT_MAX
|
||||
while (length > UINT_MAX) {
|
||||
size_t written = 0;
|
||||
// Write a UINT_MAX-sized chunk and check for failure
|
||||
// even after failure we continue writing because the data will be buffered
|
||||
if(!this->write(data.substr(0, UINT_MAX), &written)) {
|
||||
has_failed = true;
|
||||
}
|
||||
total_written += written;
|
||||
length -= UINT_MAX;
|
||||
data = data.substr(UINT_MAX);
|
||||
}
|
||||
// Handle the final chunk (less than UINT_MAX bytes)
|
||||
if (length > 0) {
|
||||
size_t written = 0;
|
||||
if(!this->write(data, &written)) {
|
||||
has_failed = true;
|
||||
}
|
||||
total_written += written;
|
||||
}
|
||||
if (writtenPtr) {
|
||||
*writtenPtr = total_written;
|
||||
}
|
||||
return !has_failed;
|
||||
}
|
||||
|
||||
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
|
||||
if (!(httpResponseData->state & HttpResponseData<SSL>::HTTP_WROTE_CONTENT_LENGTH_HEADER) && !httpResponseData->fromAncientRequest) {
|
||||
@@ -499,17 +528,36 @@ public:
|
||||
Super::write("\r\n", 2);
|
||||
httpResponseData->state |= HttpResponseData<SSL>::HTTP_WRITE_CALLED;
|
||||
}
|
||||
size_t total_written = 0;
|
||||
bool has_failed = false;
|
||||
|
||||
auto [written, failed] = Super::write(data.data(), (int) data.length());
|
||||
// Handle data larger than INT_MAX by writing it in chunks of INT_MAX bytes
|
||||
while (length > INT_MAX) {
|
||||
// Write the maximum allowed chunk size (INT_MAX)
|
||||
auto [written, failed] = Super::write(data.data(), INT_MAX);
|
||||
// If the write failed, set the has_failed flag we continue writting because the data will be buffered
|
||||
has_failed = has_failed || failed;
|
||||
total_written += written;
|
||||
length -= INT_MAX;
|
||||
data = data.substr(INT_MAX);
|
||||
}
|
||||
// Handle the remaining data (less than INT_MAX bytes)
|
||||
if (length > 0) {
|
||||
// Write the final chunk with exact remaining length
|
||||
auto [written, failed] = Super::write(data.data(), (int) length);
|
||||
has_failed = has_failed || failed;
|
||||
total_written += written;
|
||||
}
|
||||
|
||||
/* Reset timeout on each sended chunk */
|
||||
this->resetTimeout();
|
||||
|
||||
if (writtenPtr) {
|
||||
*writtenPtr = written;
|
||||
*writtenPtr = total_written;
|
||||
}
|
||||
|
||||
/* If we did not fail the write, accept more */
|
||||
return !failed;
|
||||
return !has_failed;
|
||||
}
|
||||
|
||||
/* Get the current byte write offset for this Http response */
|
||||
@@ -660,12 +708,6 @@ public:
|
||||
return httpResponseData->socketData;
|
||||
}
|
||||
|
||||
void setSocketData(void* socketData) {
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
|
||||
httpResponseData->socketData = socketData;
|
||||
}
|
||||
|
||||
void setWriteOffset(uint64_t offset) {
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
|
||||
|
||||
@@ -339,7 +339,7 @@ private:
|
||||
|
||||
/* We store old backpressure since it is unclear whether write drained anything,
|
||||
* however, in case of coming here with 0 backpressure we still need to emit drain event */
|
||||
unsigned int backpressure = asyncSocket->getBufferedAmount();
|
||||
size_t backpressure = asyncSocket->getBufferedAmount();
|
||||
|
||||
/* Drain as much as possible */
|
||||
asyncSocket->write(nullptr, 0);
|
||||
|
||||
@@ -2,8 +2,15 @@
|
||||
|
||||
import { spawn as nodeSpawn } from "node:child_process";
|
||||
import { existsSync, readFileSync, mkdirSync, cpSync, chmodSync } from "node:fs";
|
||||
import { basename, join, resolve } from "node:path";
|
||||
import { isCI, printEnvironment, startGroup } from "./utils.mjs";
|
||||
import { basename, join, relative, resolve } from "node:path";
|
||||
import {
|
||||
formatAnnotationToHtml,
|
||||
isCI,
|
||||
parseAnnotations,
|
||||
printEnvironment,
|
||||
reportAnnotationToBuildKite,
|
||||
startGroup,
|
||||
} from "./utils.mjs";
|
||||
|
||||
// https://cmake.org/cmake/help/latest/manual/cmake.1.html#generate-a-project-buildsystem
|
||||
const generateFlags = [
|
||||
@@ -222,16 +229,24 @@ async function spawn(command, args, options, label) {
|
||||
timestamp = Date.now();
|
||||
});
|
||||
|
||||
let stdoutBuffer = "";
|
||||
|
||||
let done;
|
||||
if (pipe) {
|
||||
const stdout = new Promise(resolve => {
|
||||
subprocess.stdout.on("end", resolve);
|
||||
subprocess.stdout.on("data", data => process.stdout.write(data));
|
||||
subprocess.stdout.on("data", data => {
|
||||
stdoutBuffer += data.toString();
|
||||
process.stdout.write(data);
|
||||
});
|
||||
});
|
||||
|
||||
const stderr = new Promise(resolve => {
|
||||
subprocess.stderr.on("end", resolve);
|
||||
subprocess.stderr.on("data", data => process.stderr.write(data));
|
||||
subprocess.stderr.on("data", data => {
|
||||
stdoutBuffer += data.toString();
|
||||
process.stderr.write(data);
|
||||
});
|
||||
});
|
||||
|
||||
done = Promise.all([stdout, stderr]);
|
||||
@@ -252,9 +267,40 @@ async function spawn(command, args, options, label) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
console.error(error);
|
||||
} else if (signalCode) {
|
||||
if (isBuildkite()) {
|
||||
let annotated;
|
||||
try {
|
||||
const { annotations } = parseAnnotations(stdoutBuffer);
|
||||
for (const annotation of annotations) {
|
||||
const content = formatAnnotationToHtml(annotation);
|
||||
reportAnnotationToBuildKite({
|
||||
priority: 10,
|
||||
label: annotation.title || annotation.filename,
|
||||
content,
|
||||
});
|
||||
annotated = true;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Failed to parse annotations:`, error);
|
||||
}
|
||||
|
||||
if (!annotated) {
|
||||
const content = formatAnnotationToHtml({
|
||||
filename: relative(process.cwd(), import.meta.filename),
|
||||
title: "build failed",
|
||||
content: stdoutBuffer,
|
||||
source: "build",
|
||||
level: "error",
|
||||
});
|
||||
reportAnnotationToBuildKite({
|
||||
priority: 10,
|
||||
label: "build failed",
|
||||
content,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (signalCode) {
|
||||
console.error(`Command killed: ${signalCode}`);
|
||||
} else {
|
||||
console.error(`Command exited: code ${exitCode}`);
|
||||
|
||||
31
scripts/generate-perf-trace-events.sh
Executable file
31
scripts/generate-perf-trace-events.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
# This file is not run often, so we don't need to make it part of the build system.
|
||||
# We do this because the event names have to be compile-time constants.
|
||||
|
||||
|
||||
export TRACE_EVENTS=$(rg 'bun\.perf\.trace\("([^"]*)"\)' -t zig --json \
|
||||
| jq -r 'select(.type == "match")' \
|
||||
| jq -r '.data.submatches[].match.text' \
|
||||
| cut -d'"' -f2 \
|
||||
| sort \
|
||||
| uniq)
|
||||
|
||||
echo "// Generated with scripts/generate-perf-trace-events.sh" > src/bun.js/bindings/generated_perf_trace_events.h
|
||||
echo "// clang-format off" >> src/bun.js/bindings/generated_perf_trace_events.h
|
||||
echo "#define FOR_EACH_TRACE_EVENT(macro) \\" >> src/bun.js/bindings/generated_perf_trace_events.h
|
||||
i=0
|
||||
for event in $TRACE_EVENTS; do
|
||||
echo " macro($event, $((i++))) \\" >> src/bun.js/bindings/generated_perf_trace_events.h
|
||||
done
|
||||
echo " // end" >> src/bun.js/bindings/generated_perf_trace_events.h
|
||||
|
||||
echo "Generated src/bun.js/bindings/generated_perf_trace_events.h"
|
||||
|
||||
echo "// Generated with scripts/generate-perf-trace-events.sh" > src/generated_perf_trace_events.zig
|
||||
echo "pub const PerfEvent = enum(i32) {" >> src/generated_perf_trace_events.zig
|
||||
for event in $TRACE_EVENTS; do
|
||||
echo " @\"$event\"," >> src/generated_perf_trace_events.zig
|
||||
done
|
||||
echo "};" >> src/generated_perf_trace_events.zig
|
||||
|
||||
echo "Generated src/generated_perf_trace_events.zig"
|
||||
@@ -7,26 +7,36 @@
|
||||
// - It cannot use Bun APIs, since it is run using Node.js.
|
||||
// - It does not import dependencies, so it's faster to start.
|
||||
|
||||
import { spawn, spawnSync } from "node:child_process";
|
||||
import { createHash } from "node:crypto";
|
||||
import {
|
||||
constants as fs,
|
||||
readFileSync,
|
||||
mkdtempSync,
|
||||
existsSync,
|
||||
statSync,
|
||||
mkdirSync,
|
||||
accessSync,
|
||||
appendFileSync,
|
||||
existsSync,
|
||||
constants as fs,
|
||||
mkdirSync,
|
||||
mkdtempSync,
|
||||
readdirSync,
|
||||
readFileSync,
|
||||
statSync,
|
||||
unlink,
|
||||
unlinkSync,
|
||||
writeFileSync,
|
||||
} from "node:fs";
|
||||
import { spawn, spawnSync } from "node:child_process";
|
||||
import { join, basename, dirname, relative, sep } from "node:path";
|
||||
import { readFile } from "node:fs/promises";
|
||||
import { userInfo } from "node:os";
|
||||
import { basename, dirname, join, relative, sep } from "node:path";
|
||||
import { parseArgs } from "node:util";
|
||||
import {
|
||||
getBranch,
|
||||
getBuildLabel,
|
||||
getBuildUrl,
|
||||
getCommit,
|
||||
getEnv,
|
||||
getFileUrl,
|
||||
getHostname,
|
||||
getLoggedInUserCountOrDetails,
|
||||
getSecret,
|
||||
getShell,
|
||||
getWindowsExitReason,
|
||||
isBuildkite,
|
||||
@@ -36,11 +46,11 @@ import {
|
||||
isWindows,
|
||||
isX64,
|
||||
printEnvironment,
|
||||
reportAnnotationToBuildKite,
|
||||
startGroup,
|
||||
tmpdir,
|
||||
unzip,
|
||||
} from "./utils.mjs";
|
||||
import { userInfo } from "node:os";
|
||||
let isQuiet = false;
|
||||
const cwd = import.meta.dirname ? dirname(import.meta.dirname) : process.cwd();
|
||||
const testsPath = join(cwd, "test");
|
||||
@@ -115,9 +125,32 @@ const { values: options, positionals: filters } = parseArgs({
|
||||
type: "string",
|
||||
default: isCI ? "4" : "0", // N retries = N+1 attempts
|
||||
},
|
||||
["junit"]: {
|
||||
type: "boolean",
|
||||
default: isCI, // Always enable JUnit in CI
|
||||
},
|
||||
["junit-temp-dir"]: {
|
||||
type: "string",
|
||||
default: "junit-reports",
|
||||
},
|
||||
["junit-upload"]: {
|
||||
type: "boolean",
|
||||
default: isBuildkite,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const cliOptions = options;
|
||||
|
||||
if (cliOptions.junit) {
|
||||
try {
|
||||
cliOptions["junit-temp-dir"] = mkdtempSync(join(tmpdir(), cliOptions["junit-temp-dir"]));
|
||||
} catch (err) {
|
||||
cliOptions.junit = false;
|
||||
console.error(`Error creating JUnit temp directory: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (options["quiet"]) {
|
||||
isQuiet = true;
|
||||
}
|
||||
@@ -336,6 +369,95 @@ async function runTests() {
|
||||
reportOutputToGitHubAction("failing_tests", markdown);
|
||||
}
|
||||
|
||||
// Generate and upload JUnit reports if requested
|
||||
if (options["junit"]) {
|
||||
const junitTempDir = options["junit-temp-dir"];
|
||||
mkdirSync(junitTempDir, { recursive: true });
|
||||
|
||||
// Generate JUnit reports for tests that don't use bun test
|
||||
const nonBunTestResults = [...okResults, ...flakyResults, ...failedResults].filter(result => {
|
||||
// Check if this is a test that wasn't run with bun test
|
||||
const isNodeTest =
|
||||
isJavaScript(result.testPath) && !isTestStrict(result.testPath) && !result.testPath.includes("vendor");
|
||||
return isNodeTest;
|
||||
});
|
||||
|
||||
// If we have tests not covered by bun test JUnit reports, generate a report for them
|
||||
if (nonBunTestResults.length > 0) {
|
||||
const nonBunTestJunitPath = join(junitTempDir, "non-bun-test-results.xml");
|
||||
generateJUnitReport(nonBunTestJunitPath, nonBunTestResults);
|
||||
!isQuiet &&
|
||||
console.log(
|
||||
`Generated JUnit report for ${nonBunTestResults.length} non-bun test results at ${nonBunTestJunitPath}`,
|
||||
);
|
||||
|
||||
// Upload this report immediately if we're on BuildKite
|
||||
if (isBuildkite && options["junit-upload"]) {
|
||||
const uploadSuccess = await uploadJUnitToBuildKite(nonBunTestJunitPath);
|
||||
if (uploadSuccess) {
|
||||
// Delete the file after successful upload to prevent redundant uploads
|
||||
try {
|
||||
unlinkSync(nonBunTestJunitPath);
|
||||
!isQuiet && console.log(`Uploaded and deleted non-bun test JUnit report`);
|
||||
} catch (unlinkError) {
|
||||
!isQuiet && console.log(`Uploaded but failed to delete non-bun test JUnit report: ${unlinkError.message}`);
|
||||
}
|
||||
} else {
|
||||
!isQuiet && console.log(`Failed to upload non-bun test JUnit report to BuildKite`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any JUnit reports that may not have been uploaded yet
|
||||
// Since we're deleting files after upload, any remaining files need to be uploaded
|
||||
if (isBuildkite && options["junit-upload"]) {
|
||||
try {
|
||||
// Only process XML files and skip the non-bun test results which we've already uploaded
|
||||
const allJunitFiles = readdirSync(junitTempDir).filter(
|
||||
file => file.endsWith(".xml") && file !== "non-bun-test-results.xml",
|
||||
);
|
||||
|
||||
if (allJunitFiles.length > 0) {
|
||||
!isQuiet && console.log(`Found ${allJunitFiles.length} remaining JUnit reports to upload...`);
|
||||
|
||||
// Process each remaining JUnit file - these are files we haven't processed yet
|
||||
let uploadedCount = 0;
|
||||
|
||||
for (const file of allJunitFiles) {
|
||||
const filePath = join(junitTempDir, file);
|
||||
|
||||
if (existsSync(filePath)) {
|
||||
try {
|
||||
const uploadSuccess = await uploadJUnitToBuildKite(filePath);
|
||||
if (uploadSuccess) {
|
||||
// Delete the file after successful upload
|
||||
try {
|
||||
unlinkSync(filePath);
|
||||
uploadedCount++;
|
||||
} catch (unlinkError) {
|
||||
!isQuiet && console.log(`Uploaded but failed to delete ${file}: ${unlinkError.message}`);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Error uploading JUnit file ${file}:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (uploadedCount > 0) {
|
||||
!isQuiet && console.log(`Uploaded and deleted ${uploadedCount} remaining JUnit reports`);
|
||||
} else {
|
||||
!isQuiet && console.log(`No JUnit reports needed to be uploaded`);
|
||||
}
|
||||
} else {
|
||||
!isQuiet && console.log(`No remaining JUnit reports found to upload`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Error checking for remaining JUnit reports:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!isCI && !isQuiet) {
|
||||
console.table({
|
||||
"Total Tests": okResults.length + failedResults.length + flakyResults.length,
|
||||
@@ -652,8 +774,31 @@ async function spawnBunTest(execPath, testPath, options = { cwd }) {
|
||||
const absPath = join(options["cwd"], testPath);
|
||||
const isReallyTest = isTestStrict(testPath) || absPath.includes("vendor");
|
||||
const args = options["args"] ?? [];
|
||||
|
||||
const testArgs = ["test", ...args, `--timeout=${perTestTimeout}`];
|
||||
|
||||
// This will be set if a JUnit file is generated
|
||||
let junitFilePath = null;
|
||||
|
||||
// In CI, we want to use JUnit for all tests
|
||||
// Create a unique filename for each test run using a hash of the test path
|
||||
// This ensures we can run tests in parallel without file conflicts
|
||||
if (cliOptions.junit) {
|
||||
const testHash = createHash("sha1").update(testPath).digest("base64url");
|
||||
const junitTempDir = cliOptions["junit-temp-dir"];
|
||||
|
||||
// Create the JUnit file path
|
||||
junitFilePath = `${junitTempDir}/test-${testHash}.xml`;
|
||||
|
||||
// Add JUnit reporter
|
||||
testArgs.push("--reporter=junit");
|
||||
testArgs.push(`--reporter-outfile=${junitFilePath}`);
|
||||
}
|
||||
|
||||
testArgs.push(absPath);
|
||||
|
||||
const { ok, error, stdout } = await spawnBun(execPath, {
|
||||
args: isReallyTest ? ["test", ...args, `--timeout=${perTestTimeout}`, absPath] : [...args, absPath],
|
||||
args: isReallyTest ? testArgs : [...args, absPath],
|
||||
cwd: options["cwd"],
|
||||
timeout: isReallyTest ? timeout : 30_000,
|
||||
env: {
|
||||
@@ -663,6 +808,15 @@ async function spawnBunTest(execPath, testPath, options = { cwd }) {
|
||||
stderr: chunk => pipeTestStdout(process.stderr, chunk),
|
||||
});
|
||||
const { tests, errors, stdout: stdoutPreview } = parseTestStdout(stdout, testPath);
|
||||
|
||||
// If we generated a JUnit file and we're on BuildKite, upload it immediately
|
||||
if (junitFilePath && isReallyTest && isBuildkite && cliOptions["junit-upload"]) {
|
||||
// Give the file system a moment to finish writing the file
|
||||
if (existsSync(junitFilePath)) {
|
||||
addToJunitUploadQueue(junitFilePath);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
testPath,
|
||||
ok,
|
||||
@@ -1080,7 +1234,7 @@ function getRelevantTests(cwd) {
|
||||
const filteredTests = [];
|
||||
|
||||
if (options["node-tests"]) {
|
||||
tests = tests.filter(isNodeParallelTest);
|
||||
tests = tests.filter(isNodeTest);
|
||||
}
|
||||
|
||||
const isMatch = (testPath, filter) => {
|
||||
@@ -1333,7 +1487,9 @@ function formatTestToMarkdown(result, concise) {
|
||||
if (error) {
|
||||
markdown += ` - ${error}`;
|
||||
}
|
||||
markdown += ` on ${platform}`;
|
||||
if (platform) {
|
||||
markdown += ` on ${platform}`;
|
||||
}
|
||||
|
||||
if (concise) {
|
||||
markdown += "</li>\n";
|
||||
@@ -1396,48 +1552,6 @@ function listArtifactsFromBuildKite(glob, step) {
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {object} BuildkiteAnnotation
|
||||
* @property {string} [context]
|
||||
* @property {string} label
|
||||
* @property {string} content
|
||||
* @property {"error" | "warning" | "info"} [style]
|
||||
* @property {number} [priority]
|
||||
* @property {number} [attempt]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {BuildkiteAnnotation} annotation
|
||||
*/
|
||||
function reportAnnotationToBuildKite({ context, label, content, style = "error", priority = 3, attempt = 0 }) {
|
||||
const { error, status, signal, stderr } = spawnSync(
|
||||
"buildkite-agent",
|
||||
["annotate", "--append", "--style", `${style}`, "--context", `${context || label}`, "--priority", `${priority}`],
|
||||
{
|
||||
input: content,
|
||||
stdio: ["pipe", "ignore", "pipe"],
|
||||
encoding: "utf-8",
|
||||
timeout: spawnTimeout,
|
||||
cwd,
|
||||
},
|
||||
);
|
||||
if (status === 0) {
|
||||
return;
|
||||
}
|
||||
if (attempt > 0) {
|
||||
const cause = error ?? signal ?? `code ${status}`;
|
||||
throw new Error(`Failed to create annotation: ${label}`, { cause });
|
||||
}
|
||||
const buildLabel = getTestLabel();
|
||||
const buildUrl = getBuildUrl();
|
||||
const platform = buildUrl ? `<a href="${buildUrl}">${buildLabel}</a>` : buildLabel;
|
||||
let errorMessage = `<details><summary><code>${label}</code> - annotation error on ${platform}</summary>`;
|
||||
if (stderr) {
|
||||
errorMessage += `\n\n\`\`\`terminal\n${escapeCodeBlock(stderr)}\n\`\`\`\n\n</details>\n\n`;
|
||||
}
|
||||
reportAnnotationToBuildKite({ label: `${label}-error`, content: errorMessage, attempt: attempt + 1 });
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} name
|
||||
* @param {string} value
|
||||
@@ -1588,6 +1702,297 @@ function onExit(signal) {
|
||||
});
|
||||
}
|
||||
|
||||
let getBuildkiteAnalyticsToken = () => {
|
||||
let token = getSecret("TEST_REPORTING_API", { required: true });
|
||||
getBuildkiteAnalyticsToken = () => token;
|
||||
return token;
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate a JUnit XML report from test results
|
||||
* @param {string} outfile - The path to write the JUnit XML report to
|
||||
* @param {TestResult[]} results - The test results to include in the report
|
||||
*/
|
||||
function generateJUnitReport(outfile, results) {
|
||||
!isQuiet && console.log(`Generating JUnit XML report: ${outfile}`);
|
||||
|
||||
// Start the XML document
|
||||
let xml = '<?xml version="1.0" encoding="UTF-8"?>\n';
|
||||
|
||||
// Add an overall testsuite container with metadata
|
||||
const totalTests = results.length;
|
||||
const totalFailures = results.filter(r => r.status === "fail").length;
|
||||
const timestamp = new Date().toISOString();
|
||||
|
||||
// Calculate total time
|
||||
const totalTime = results.reduce((sum, result) => {
|
||||
const duration = result.duration || 0;
|
||||
return sum + duration / 1000; // Convert ms to seconds
|
||||
}, 0);
|
||||
|
||||
// Create a unique package name to identify this run
|
||||
const packageName = `bun.internal.${process.env.BUILDKITE_PIPELINE_SLUG || "tests"}`;
|
||||
|
||||
xml += `<testsuites name="${escapeXml(packageName)}" tests="${totalTests}" failures="${totalFailures}" time="${totalTime.toFixed(3)}" timestamp="${timestamp}">\n`;
|
||||
|
||||
// Group results by test file
|
||||
const testSuites = new Map();
|
||||
|
||||
for (const result of results) {
|
||||
const { testPath, ok, status, error, tests, stdoutPreview, stdout, duration = 0 } = result;
|
||||
|
||||
if (!testSuites.has(testPath)) {
|
||||
testSuites.set(testPath, {
|
||||
name: testPath,
|
||||
tests: [],
|
||||
failures: 0,
|
||||
errors: 0,
|
||||
skipped: 0,
|
||||
time: 0,
|
||||
timestamp: timestamp,
|
||||
hostname: getHostname(),
|
||||
stdout: stdout || stdoutPreview || "",
|
||||
});
|
||||
}
|
||||
|
||||
const suite = testSuites.get(testPath);
|
||||
|
||||
// For test suites with granular test information
|
||||
if (tests.length > 0) {
|
||||
for (const test of tests) {
|
||||
const { test: testName, status: testStatus, duration: testDuration = 0, errors: testErrors = [] } = test;
|
||||
|
||||
suite.time += testDuration / 1000; // Convert to seconds
|
||||
|
||||
const testCase = {
|
||||
name: testName,
|
||||
classname: `${packageName}.${testPath.replace(/[\/\\]/g, ".")}`,
|
||||
time: testDuration / 1000, // Convert to seconds
|
||||
};
|
||||
|
||||
if (testStatus === "fail") {
|
||||
suite.failures++;
|
||||
|
||||
// Collect error details
|
||||
let errorMessage = "Test failed";
|
||||
let errorType = "AssertionError";
|
||||
let errorContent = "";
|
||||
|
||||
if (testErrors && testErrors.length > 0) {
|
||||
const primaryError = testErrors[0];
|
||||
errorMessage = primaryError.name || "Test failed";
|
||||
errorType = primaryError.name || "AssertionError";
|
||||
errorContent = primaryError.stack || primaryError.name;
|
||||
|
||||
if (testErrors.length > 1) {
|
||||
errorContent +=
|
||||
"\n\nAdditional errors:\n" +
|
||||
testErrors
|
||||
.slice(1)
|
||||
.map(e => e.stack || e.name)
|
||||
.join("\n");
|
||||
}
|
||||
} else {
|
||||
errorContent = error || "Unknown error";
|
||||
}
|
||||
|
||||
testCase.failure = {
|
||||
message: errorMessage,
|
||||
type: errorType,
|
||||
content: errorContent,
|
||||
};
|
||||
} else if (testStatus === "skip" || testStatus === "todo") {
|
||||
suite.skipped++;
|
||||
testCase.skipped = {
|
||||
message: testStatus === "skip" ? "Test skipped" : "Test marked as todo",
|
||||
};
|
||||
}
|
||||
|
||||
suite.tests.push(testCase);
|
||||
}
|
||||
} else {
|
||||
// For test suites without granular test information (e.g., bun install tests)
|
||||
suite.time += duration / 1000; // Convert to seconds
|
||||
|
||||
const testCase = {
|
||||
name: basename(testPath),
|
||||
classname: `${packageName}.${testPath.replace(/[\/\\]/g, ".")}`,
|
||||
time: duration / 1000, // Convert to seconds
|
||||
};
|
||||
|
||||
if (status === "fail") {
|
||||
suite.failures++;
|
||||
testCase.failure = {
|
||||
message: "Test failed",
|
||||
type: "AssertionError",
|
||||
content: error || "Unknown error",
|
||||
};
|
||||
}
|
||||
|
||||
suite.tests.push(testCase);
|
||||
}
|
||||
}
|
||||
|
||||
// Write each test suite to the XML
|
||||
for (const [name, suite] of testSuites) {
|
||||
xml += ` <testsuite name="${escapeXml(name)}" tests="${suite.tests.length}" failures="${suite.failures}" errors="${suite.errors}" skipped="${suite.skipped}" time="${suite.time.toFixed(3)}" timestamp="${suite.timestamp}" hostname="${escapeXml(suite.hostname)}">\n`;
|
||||
|
||||
// Include system-out if we have stdout
|
||||
if (suite.stdout) {
|
||||
xml += ` <system-out><![CDATA[${suite.stdout}]]></system-out>\n`;
|
||||
}
|
||||
|
||||
// Write each test case
|
||||
for (const test of suite.tests) {
|
||||
xml += ` <testcase name="${escapeXml(test.name)}" classname="${escapeXml(test.classname)}" time="${test.time.toFixed(3)}"`;
|
||||
|
||||
if (test.skipped) {
|
||||
xml += `>\n <skipped message="${escapeXml(test.skipped.message)}"/>\n </testcase>\n`;
|
||||
} else if (test.failure) {
|
||||
xml += `>\n`;
|
||||
xml += ` <failure message="${escapeXml(test.failure.message)}" type="${escapeXml(test.failure.type)}"><![CDATA[${test.failure.content}]]></failure>\n`;
|
||||
xml += ` </testcase>\n`;
|
||||
} else {
|
||||
xml += `/>\n`;
|
||||
}
|
||||
}
|
||||
|
||||
xml += ` </testsuite>\n`;
|
||||
}
|
||||
|
||||
xml += `</testsuites>`;
|
||||
|
||||
// Create directory if it doesn't exist
|
||||
const dir = dirname(outfile);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
|
||||
// Write to file
|
||||
writeFileSync(outfile, xml);
|
||||
!isQuiet && console.log(`JUnit XML report written to ${outfile}`);
|
||||
}
|
||||
|
||||
let isUploadingToBuildKite = false;
|
||||
const junitUploadQueue = [];
|
||||
async function addToJunitUploadQueue(junitFilePath) {
|
||||
junitUploadQueue.push(junitFilePath);
|
||||
|
||||
if (!isUploadingToBuildKite) {
|
||||
drainJunitUploadQueue();
|
||||
}
|
||||
}
|
||||
|
||||
async function drainJunitUploadQueue() {
|
||||
isUploadingToBuildKite = true;
|
||||
while (junitUploadQueue.length > 0) {
|
||||
const testPath = junitUploadQueue.shift();
|
||||
await uploadJUnitToBuildKite(testPath)
|
||||
.then(uploadSuccess => {
|
||||
unlink(testPath, () => {
|
||||
if (!uploadSuccess) {
|
||||
console.error(`Failed to upload JUnit report for ${testPath}`);
|
||||
}
|
||||
});
|
||||
})
|
||||
.catch(err => {
|
||||
console.error(`Error uploading JUnit report for ${testPath}:`, err);
|
||||
});
|
||||
}
|
||||
isUploadingToBuildKite = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload JUnit XML report to BuildKite Test Analytics
|
||||
* @param {string} junitFile - Path to the JUnit XML file to upload
|
||||
* @returns {Promise<boolean>} - Whether the upload was successful
|
||||
*/
|
||||
async function uploadJUnitToBuildKite(junitFile) {
|
||||
const fileName = basename(junitFile);
|
||||
!isQuiet && console.log(`Uploading JUnit file "${fileName}" to BuildKite Test Analytics...`);
|
||||
|
||||
// Get BuildKite environment variables for run_env fields
|
||||
const buildId = getEnv("BUILDKITE_BUILD_ID", false);
|
||||
const buildUrl = getEnv("BUILDKITE_BUILD_URL", false);
|
||||
const branch = getBranch();
|
||||
const commit = getCommit();
|
||||
const buildNumber = getEnv("BUILDKITE_BUILD_NUMBER", false);
|
||||
const jobId = getEnv("BUILDKITE_JOB_ID", false);
|
||||
const message = getEnv("BUILDKITE_MESSAGE", false);
|
||||
|
||||
try {
|
||||
// Add a unique test suite identifier to help with correlation in BuildKite
|
||||
const testId = fileName.replace(/\.xml$/, "");
|
||||
|
||||
// Use fetch and FormData instead of curl
|
||||
const formData = new FormData();
|
||||
|
||||
// Add the JUnit file data
|
||||
formData.append("data", new Blob([await readFile(junitFile)]), fileName);
|
||||
formData.append("format", "junit");
|
||||
formData.append("run_env[CI]", "buildkite");
|
||||
|
||||
// Add additional fields
|
||||
if (buildId) formData.append("run_env[key]", buildId);
|
||||
if (buildUrl) formData.append("run_env[url]", buildUrl);
|
||||
if (branch) formData.append("run_env[branch]", branch);
|
||||
if (commit) formData.append("run_env[commit_sha]", commit);
|
||||
if (buildNumber) formData.append("run_env[number]", buildNumber);
|
||||
if (jobId) formData.append("run_env[job_id]", jobId);
|
||||
if (message) formData.append("run_env[message]", message);
|
||||
|
||||
// Add custom tags
|
||||
formData.append("tags[runtime]", "bun");
|
||||
formData.append("tags[suite]", testId);
|
||||
|
||||
// Add additional context information specific to this run
|
||||
formData.append("run_env[source]", "junit-import");
|
||||
formData.append("run_env[collector]", "bun-runner");
|
||||
|
||||
const url = "https://analytics-api.buildkite.com/v1/uploads";
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Authorization": `Token token="${getBuildkiteAnalyticsToken()}"`,
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
!isQuiet && console.log(`JUnit file "${fileName}" successfully uploaded to BuildKite Test Analytics`);
|
||||
|
||||
try {
|
||||
// Consume the body to ensure Node releases the memory.
|
||||
await response.arrayBuffer();
|
||||
} catch (error) {
|
||||
// Don't care if this fails.
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
const errorText = await response.text();
|
||||
console.error(`Failed to upload JUnit file "${fileName}": HTTP ${response.status}`, errorText);
|
||||
return false;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error uploading JUnit file "${fileName}":`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape XML special characters
|
||||
* @param {string} str - String to escape
|
||||
* @returns {string} - Escaped string
|
||||
*/
|
||||
function escapeXml(str) {
|
||||
if (typeof str !== "string") return "";
|
||||
return str
|
||||
.replace(/&/g, "&")
|
||||
.replace(/</g, "<")
|
||||
.replace(/>/g, ">")
|
||||
.replace(/"/g, """)
|
||||
.replace(/'/g, "'");
|
||||
}
|
||||
|
||||
export async function main() {
|
||||
for (const signal of ["SIGINT", "SIGTERM", "SIGHUP"]) {
|
||||
process.on(signal, () => onExit(signal));
|
||||
|
||||
@@ -2366,6 +2366,81 @@ export function parseAnnotation(options, context) {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} AnnotationFormatOptions
|
||||
* @property {boolean} [concise]
|
||||
* @property {boolean} [buildkite]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {Annotation} annotation
|
||||
* @param {AnnotationFormatOptions} [options]
|
||||
* @returns {string}
|
||||
*/
|
||||
export function formatAnnotationToHtml(annotation, options = {}) {
|
||||
const { title, content, source, level, filename, line } = annotation;
|
||||
const { concise, buildkite = isBuildkite } = options;
|
||||
|
||||
let html;
|
||||
if (concise) {
|
||||
html = "<li>";
|
||||
} else {
|
||||
html = "<details><summary>";
|
||||
}
|
||||
|
||||
if (filename) {
|
||||
const filePath = filename.replace(/\\/g, "/");
|
||||
const fileUrl = getFileUrl(filePath, line);
|
||||
if (fileUrl) {
|
||||
html += `<a href="${fileUrl}"><code>${filePath}</code></a>`;
|
||||
} else {
|
||||
html += `<code>${filePath}</code>`;
|
||||
}
|
||||
html += " - ";
|
||||
}
|
||||
|
||||
if (title) {
|
||||
html += title;
|
||||
} else if (source) {
|
||||
if (level) {
|
||||
html += `${source} ${level}`;
|
||||
} else {
|
||||
html += source;
|
||||
}
|
||||
} else if (level) {
|
||||
html += level;
|
||||
} else {
|
||||
html += "unknown error";
|
||||
}
|
||||
|
||||
const buildLabel = getBuildLabel();
|
||||
if (buildLabel) {
|
||||
html += " on ";
|
||||
const buildUrl = getBuildUrl();
|
||||
if (buildUrl) {
|
||||
html += `<a href="${buildUrl}">${buildLabel}</a>`;
|
||||
} else {
|
||||
html += buildLabel;
|
||||
}
|
||||
}
|
||||
|
||||
if (concise) {
|
||||
html += "</li>\n";
|
||||
} else {
|
||||
html += "</summary>\n\n";
|
||||
if (buildkite) {
|
||||
const preview = escapeCodeBlock(content);
|
||||
html += `\`\`\`terminal\n${preview}\n\`\`\`\n`;
|
||||
} else {
|
||||
const preview = escapeHtml(stripAnsi(content));
|
||||
html += `<pre><code>${preview}</code></pre>\n`;
|
||||
}
|
||||
html += "\n\n</details>\n\n";
|
||||
}
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} AnnotationResult
|
||||
* @property {Annotation[]} annotations
|
||||
@@ -2399,7 +2474,7 @@ export function parseAnnotations(content, options = {}) {
|
||||
let length = 0;
|
||||
let match;
|
||||
|
||||
while (i + length <= originalLines.length && length < maxLength) {
|
||||
while (i + length < originalLines.length && length < maxLength) {
|
||||
const originalLine = originalLines[i + length++];
|
||||
const line = stripAnsi(originalLine).trim();
|
||||
const patternMatch = pattern.exec(line);
|
||||
@@ -2548,6 +2623,49 @@ export function parseAnnotations(content, options = {}) {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {object} BuildkiteAnnotation
|
||||
* @property {string} [context]
|
||||
* @property {string} label
|
||||
* @property {string} content
|
||||
* @property {"error" | "warning" | "info"} [style]
|
||||
* @property {number} [priority]
|
||||
* @property {number} [attempt]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {BuildkiteAnnotation} annotation
|
||||
*/
|
||||
export function reportAnnotationToBuildKite({ context, label, content, style = "error", priority = 3, attempt = 0 }) {
|
||||
if (!isBuildkite) {
|
||||
return;
|
||||
}
|
||||
const { error, status, signal, stderr } = nodeSpawnSync(
|
||||
"buildkite-agent",
|
||||
["annotate", "--append", "--style", `${style}`, "--context", `${context || label}`, "--priority", `${priority}`],
|
||||
{
|
||||
input: content,
|
||||
stdio: ["pipe", "ignore", "pipe"],
|
||||
encoding: "utf-8",
|
||||
timeout: 5_000,
|
||||
},
|
||||
);
|
||||
if (status === 0) {
|
||||
return;
|
||||
}
|
||||
if (attempt > 0) {
|
||||
const cause = error ?? signal ?? `code ${status}`;
|
||||
throw new Error(`Failed to create annotation: ${label}`, { cause });
|
||||
}
|
||||
const errorContent = formatAnnotationToHtml({
|
||||
title: "annotation error",
|
||||
content: stderr || "",
|
||||
source: "buildkite",
|
||||
level: "error",
|
||||
});
|
||||
reportAnnotationToBuildKite({ label: `${label}-error`, content: errorContent, attempt: attempt + 1 });
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {object} obj
|
||||
* @param {number} indent
|
||||
|
||||
@@ -59,11 +59,11 @@ else
|
||||
|
||||
pub const ReleaseImpl =
|
||||
if (builtin.os.tag == .windows)
|
||||
WindowsImpl
|
||||
else if (builtin.os.tag.isDarwin())
|
||||
DarwinImpl
|
||||
else
|
||||
FutexImpl;
|
||||
WindowsImpl
|
||||
else if (builtin.os.tag.isDarwin())
|
||||
DarwinImpl
|
||||
else
|
||||
FutexImpl;
|
||||
|
||||
pub const ExternImpl = ReleaseImpl.Type;
|
||||
|
||||
|
||||
@@ -99,13 +99,13 @@ pub const Value = union(enum) {
|
||||
const FreeContext = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
fn onFree(uncast_ctx: *anyopaque, buffer: *anyopaque, len: u32) callconv(.C) void {
|
||||
const ctx: *@This() = @alignCast(@ptrCast(uncast_ctx));
|
||||
fn onFree(ctx: *@This(), buffer: *anyopaque, len: u32) callconv(.C) void {
|
||||
ctx.allocator.free(@as([*]u8, @ptrCast(buffer))[0..len]);
|
||||
bun.destroy(ctx);
|
||||
}
|
||||
};
|
||||
return bun.String.createExternal(
|
||||
*FreeContext,
|
||||
buf.bytes,
|
||||
true,
|
||||
bun.new(FreeContext, .{ .allocator = buf.allocator }),
|
||||
|
||||
@@ -326,7 +326,7 @@ pub const StandaloneModuleGraph = struct {
|
||||
}
|
||||
|
||||
pub fn toBytes(allocator: std.mem.Allocator, prefix: []const u8, output_files: []const bun.options.OutputFile, output_format: bun.options.Format) ![]u8 {
|
||||
var serialize_trace = bun.tracy.traceNamed(@src(), "StandaloneModuleGraph.serialize");
|
||||
var serialize_trace = bun.perf.trace("StandaloneModuleGraph.serialize");
|
||||
defer serialize_trace.end();
|
||||
|
||||
var entry_point_id: ?usize = null;
|
||||
|
||||
@@ -3,8 +3,9 @@ const Watcher = @This();
|
||||
const DebugLogScope = bun.Output.Scoped(.watcher, false);
|
||||
const log = DebugLogScope.log;
|
||||
|
||||
// Consumer-facing
|
||||
watch_events: [max_count]WatchEvent,
|
||||
// This will always be [max_count]WatchEvent,
|
||||
// We avoid statically allocating because it increases the binary size.
|
||||
watch_events: []WatchEvent = &.{},
|
||||
changed_filepaths: [max_count]?[:0]u8,
|
||||
|
||||
/// The platform-specific implementation of the watcher
|
||||
@@ -86,7 +87,7 @@ pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.me
|
||||
.onFileUpdate = &wrapped.onFileUpdateWrapped,
|
||||
.onError = &wrapped.onErrorWrapped,
|
||||
.platform = .{},
|
||||
.watch_events = undefined,
|
||||
.watch_events = try allocator.alloc(WatchEvent, max_count),
|
||||
.changed_filepaths = [_]?[:0]u8{null} ** max_count,
|
||||
};
|
||||
|
||||
@@ -251,7 +252,7 @@ pub fn flushEvictions(this: *Watcher) void {
|
||||
// swapRemove messes up the order
|
||||
// But, it only messes up the order if any elements in the list appear after the item being removed
|
||||
// So if we just sort the list by the biggest index first, that should be fine
|
||||
std.sort.pdq(
|
||||
std.sort.insertion(
|
||||
WatchItemIndex,
|
||||
this.evict_list[0..this.evict_list_i],
|
||||
{},
|
||||
@@ -268,7 +269,7 @@ pub fn flushEvictions(this: *Watcher) void {
|
||||
|
||||
if (!Environment.isWindows) {
|
||||
// on mac and linux we can just close the file descriptor
|
||||
// TODO do we need to call inotify_rm_watch on linux?
|
||||
// we don't need to call inotify_rm_watch on linux because it gets removed when the file descriptor is closed
|
||||
if (fds[item].isValid()) {
|
||||
_ = bun.sys.close(fds[item]);
|
||||
}
|
||||
@@ -279,7 +280,7 @@ pub fn flushEvictions(this: *Watcher) void {
|
||||
last_item = no_watch_item;
|
||||
// This is split into two passes because reading the slice while modified is potentially unsafe.
|
||||
for (this.evict_list[0..this.evict_list_i]) |item| {
|
||||
if (item == last_item) continue;
|
||||
if (item == last_item or this.watchlist.len <= item) continue;
|
||||
this.watchlist.swapRemove(item);
|
||||
last_item = item;
|
||||
}
|
||||
|
||||
@@ -73,8 +73,8 @@ pub fn allocator(scope: *AllocationScope) Allocator {
|
||||
|
||||
const vtable: Allocator.VTable = .{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.resize = &std.mem.Allocator.noResize,
|
||||
.remap = &std.mem.Allocator.noRemap,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
@@ -95,16 +95,6 @@ fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: us
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
const scope: *AllocationScope = @ptrCast(@alignCast(ctx));
|
||||
return scope.parent.vtable.resize(scope.parent.ptr, buf, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
fn remap(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
|
||||
const scope: *AllocationScope = @ptrCast(@alignCast(ctx));
|
||||
return scope.parent.vtable.remap(scope.parent.ptr, buf, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void {
|
||||
const scope: *AllocationScope = @ptrCast(@alignCast(ctx));
|
||||
scope.state.mutex.lock();
|
||||
|
||||
@@ -32,7 +32,7 @@ fn mimalloc_free(
|
||||
}
|
||||
}
|
||||
|
||||
const CAllocator = struct {
|
||||
const MimallocAllocator = struct {
|
||||
pub const supports_posix_memalign = true;
|
||||
|
||||
fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
|
||||
@@ -60,36 +60,31 @@ const CAllocator = struct {
|
||||
return mimalloc.mi_malloc_size(ptr);
|
||||
}
|
||||
|
||||
fn alloc(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
fn alloc_with_default_allocator(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
return alignedAlloc(len, alignment);
|
||||
}
|
||||
|
||||
fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
if (new_len <= buf.len) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const full_len = alignedAllocSize(buf.ptr);
|
||||
if (new_len <= full_len) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
fn resize_with_default_allocator(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
return mimalloc.mi_expand(buf.ptr, new_len) != null;
|
||||
}
|
||||
|
||||
const free = mimalloc_free;
|
||||
fn remap_with_default_allocator(_: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, _: usize) ?[*]u8 {
|
||||
return @ptrCast(mimalloc.mi_realloc_aligned(buf.ptr, new_len, alignment.toByteUnits()));
|
||||
}
|
||||
|
||||
const free_with_default_allocator = mimalloc_free;
|
||||
};
|
||||
|
||||
pub const c_allocator = Allocator{
|
||||
// This ptr can be anything. But since it's not nullable, we should set it to something.
|
||||
.ptr = @constCast(c_allocator_vtable),
|
||||
.ptr = memory_allocator_tags.default_allocator_tag_ptr,
|
||||
.vtable = c_allocator_vtable,
|
||||
};
|
||||
const c_allocator_vtable = &Allocator.VTable{
|
||||
.alloc = &CAllocator.alloc,
|
||||
.resize = &CAllocator.resize,
|
||||
.remap = &std.mem.Allocator.noRemap,
|
||||
.free = &CAllocator.free,
|
||||
.alloc = &MimallocAllocator.alloc_with_default_allocator,
|
||||
.resize = &MimallocAllocator.resize_with_default_allocator,
|
||||
.remap = &MimallocAllocator.remap_with_default_allocator,
|
||||
.free = &MimallocAllocator.free_with_default_allocator,
|
||||
};
|
||||
|
||||
const ZAllocator = struct {
|
||||
@@ -119,11 +114,11 @@ const ZAllocator = struct {
|
||||
return mimalloc.mi_malloc_size(ptr);
|
||||
}
|
||||
|
||||
fn alloc(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
fn alloc_with_z_allocator(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
return alignedAlloc(len, alignment);
|
||||
}
|
||||
|
||||
fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
fn resize_with_z_allocator(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
if (new_len <= buf.len) {
|
||||
return true;
|
||||
}
|
||||
@@ -136,141 +131,24 @@ const ZAllocator = struct {
|
||||
return false;
|
||||
}
|
||||
|
||||
const free = mimalloc_free;
|
||||
const free_with_z_allocator = mimalloc_free;
|
||||
};
|
||||
|
||||
const memory_allocator_tags = struct {
|
||||
const default_allocator_tag: usize = 0xBEEFA110C; // "BEEFA110C" beef a110c i guess
|
||||
pub const default_allocator_tag_ptr: *anyopaque = @ptrFromInt(default_allocator_tag);
|
||||
|
||||
const z_allocator_tag: usize = 0x2a11043470123; // "z4110c4701" (Z ALLOCATOR in 1337 speak)
|
||||
pub const z_allocator_tag_ptr: *anyopaque = @ptrFromInt(z_allocator_tag);
|
||||
};
|
||||
|
||||
pub const z_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.ptr = memory_allocator_tags.z_allocator_tag_ptr,
|
||||
.vtable = &z_allocator_vtable,
|
||||
};
|
||||
const z_allocator_vtable = Allocator.VTable{
|
||||
.alloc = &ZAllocator.alloc,
|
||||
.resize = &ZAllocator.resize,
|
||||
.alloc = &ZAllocator.alloc_with_z_allocator,
|
||||
.resize = &ZAllocator.resize_with_z_allocator,
|
||||
.remap = &std.mem.Allocator.noRemap,
|
||||
.free = &ZAllocator.free,
|
||||
};
|
||||
const HugeAllocator = struct {
|
||||
fn alloc(
|
||||
_: *anyopaque,
|
||||
len: usize,
|
||||
alignment: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
_ = return_address;
|
||||
assert(len > 0);
|
||||
assert(std.math.isPowerOfTwo(alignment));
|
||||
|
||||
const slice = std.posix.mmap(
|
||||
null,
|
||||
len,
|
||||
std.posix.PROT.READ | std.posix.PROT.WRITE,
|
||||
std.posix.MAP.ANONYMOUS | std.posix.MAP.PRIVATE,
|
||||
-1,
|
||||
0,
|
||||
) catch
|
||||
return error.OutOfMemory;
|
||||
|
||||
_ = len_align;
|
||||
return slice;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
_: *anyopaque,
|
||||
_: []u8,
|
||||
_: u29,
|
||||
_: usize,
|
||||
_: u29,
|
||||
_: usize,
|
||||
) ?usize {
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
_: u29,
|
||||
_: usize,
|
||||
) void {
|
||||
std.posix.munmap(@alignCast(buf));
|
||||
}
|
||||
};
|
||||
|
||||
pub const huge_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &huge_allocator_vtable,
|
||||
};
|
||||
const huge_allocator_vtable = Allocator.VTable{
|
||||
.alloc = HugeAllocator.alloc,
|
||||
.resize = HugeAllocator.resize,
|
||||
.free = HugeAllocator.free,
|
||||
};
|
||||
|
||||
pub const huge_threshold = 1024 * 256;
|
||||
|
||||
const AutoSizeAllocator = struct {
|
||||
fn alloc(
|
||||
_: *anyopaque,
|
||||
len: usize,
|
||||
alignment: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
_ = len_align;
|
||||
if (len >= huge_threshold) {
|
||||
return huge_allocator.rawAlloc(
|
||||
len,
|
||||
alignment,
|
||||
return_address,
|
||||
) orelse return error.OutOfMemory;
|
||||
}
|
||||
|
||||
return c_allocator.rawAlloc(
|
||||
len,
|
||||
alignment,
|
||||
return_address,
|
||||
) orelse return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
_: *anyopaque,
|
||||
_: []u8,
|
||||
_: u29,
|
||||
_: usize,
|
||||
_: u29,
|
||||
_: usize,
|
||||
) ?usize {
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
a: u29,
|
||||
b: usize,
|
||||
) void {
|
||||
if (buf.len >= huge_threshold) {
|
||||
return huge_allocator.rawFree(
|
||||
buf,
|
||||
a,
|
||||
b,
|
||||
);
|
||||
}
|
||||
|
||||
return c_allocator.rawFree(
|
||||
buf,
|
||||
a,
|
||||
b,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
pub const auto_allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &auto_allocator_vtable,
|
||||
};
|
||||
const auto_allocator_vtable = Allocator.VTable{
|
||||
.alloc = AutoSizeAllocator.alloc,
|
||||
.resize = AutoSizeAllocator.resize,
|
||||
.free = AutoSizeAllocator.free,
|
||||
.free = &ZAllocator.free_with_z_allocator,
|
||||
};
|
||||
|
||||
@@ -10,124 +10,6 @@ const assert = bun.assert;
|
||||
const bun = @import("root").bun;
|
||||
const log = bun.Output.scoped(.mimalloc, true);
|
||||
|
||||
pub const GlobalArena = struct {
|
||||
arena: Arena,
|
||||
fallback_allocator: std.mem.Allocator,
|
||||
|
||||
pub fn initWithCapacity(capacity: usize, fallback: std.mem.Allocator) error{OutOfMemory}!GlobalArena {
|
||||
const arena = try Arena.initWithCapacity(capacity);
|
||||
|
||||
return GlobalArena{
|
||||
.arena = arena,
|
||||
.fallback_allocator = fallback,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocator(this: *GlobalArena) Allocator {
|
||||
return .{
|
||||
.ptr = this,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
self: *GlobalArena,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
return self.arena.alloc(len, ptr_align, len_align, return_address) catch
|
||||
return self.fallback_allocator.rawAlloc(len, ptr_align, return_address) orelse return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
self: *GlobalArena,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) ?usize {
|
||||
if (self.arena.ownsPtr(buf.ptr)) {
|
||||
return self.arena.resize(buf, buf_align, new_len, len_align, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address);
|
||||
}
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *GlobalArena,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
return_address: usize,
|
||||
) void {
|
||||
if (self.arena.ownsPtr(buf.ptr)) {
|
||||
return self.arena.free(buf, buf_align, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.rawFree(buf, buf_align, return_address);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const ArenaRegistry = struct {
|
||||
arenas: std.AutoArrayHashMap(?*mimalloc.Heap, std.Thread.Id) = std.AutoArrayHashMap(?*mimalloc.Heap, std.Thread.Id).init(bun.default_allocator),
|
||||
mutex: bun.Mutex = .{},
|
||||
|
||||
var registry = ArenaRegistry{};
|
||||
|
||||
pub fn register(arena: Arena) void {
|
||||
if (comptime Environment.isDebug and Environment.isNative) {
|
||||
registry.mutex.lock();
|
||||
defer registry.mutex.unlock();
|
||||
const entry = registry.arenas.getOrPut(arena.heap.?) catch unreachable;
|
||||
const received = std.Thread.getCurrentId();
|
||||
|
||||
if (entry.found_existing) {
|
||||
const expected = entry.value_ptr.*;
|
||||
if (expected != received) {
|
||||
bun.unreachablePanic("Arena created on wrong thread! Expected: {d} received: {d}", .{
|
||||
expected,
|
||||
received,
|
||||
});
|
||||
}
|
||||
}
|
||||
entry.value_ptr.* = received;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assert(arena: Arena) void {
|
||||
if (comptime Environment.isDebug and Environment.isNative) {
|
||||
registry.mutex.lock();
|
||||
defer registry.mutex.unlock();
|
||||
const expected = registry.arenas.get(arena.heap.?) orelse {
|
||||
bun.unreachablePanic("Arena not registered!", .{});
|
||||
};
|
||||
const received = std.Thread.getCurrentId();
|
||||
if (expected != received) {
|
||||
bun.unreachablePanic("Arena accessed on wrong thread! Expected: {d} received: {d}", .{
|
||||
expected,
|
||||
received,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unregister(arena: Arena) void {
|
||||
if (comptime Environment.isDebug and Environment.isNative) {
|
||||
registry.mutex.lock();
|
||||
defer registry.mutex.unlock();
|
||||
if (!registry.arenas.swapRemove(arena.heap.?)) {
|
||||
bun.unreachablePanic("Arena not registered!", .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Arena = struct {
|
||||
heap: ?*mimalloc.Heap = null,
|
||||
|
||||
@@ -149,15 +31,6 @@ pub const Arena = struct {
|
||||
return Allocator{ .ptr = this.heap.?, .vtable = &c_allocator_vtable };
|
||||
}
|
||||
|
||||
pub fn deinit(this: *Arena) void {
|
||||
// if (comptime Environment.isDebug) {
|
||||
// ArenaRegistry.unregister(this.*);
|
||||
// }
|
||||
mimalloc.mi_heap_destroy(this.heap.?);
|
||||
|
||||
this.heap = null;
|
||||
}
|
||||
|
||||
pub fn dumpThreadStats(_: *Arena) void {
|
||||
const dump_fn = struct {
|
||||
pub fn dump(textZ: [*:0]const u8, _: ?*anyopaque) callconv(.C) void {
|
||||
@@ -180,27 +53,22 @@ pub const Arena = struct {
|
||||
bun.Output.flush();
|
||||
}
|
||||
|
||||
pub fn reset(this: *Arena) void {
|
||||
this.deinit();
|
||||
this.* = init() catch unreachable;
|
||||
pub fn deinit(this: *Arena) void {
|
||||
mimalloc.mi_heap_destroy(bun.take(&this.heap).?);
|
||||
}
|
||||
|
||||
pub fn init() !Arena {
|
||||
const arena = Arena{ .heap = mimalloc.mi_heap_new() orelse return error.OutOfMemory };
|
||||
// if (comptime Environment.isDebug) {
|
||||
// ArenaRegistry.register(arena);
|
||||
// }
|
||||
return arena;
|
||||
}
|
||||
|
||||
pub fn gc(this: Arena, force: bool) void {
|
||||
mimalloc.mi_heap_collect(this.heap orelse return, force);
|
||||
pub fn gc(this: Arena) void {
|
||||
mimalloc.mi_heap_collect(this.heap orelse return, false);
|
||||
}
|
||||
|
||||
pub inline fn helpCatchMemoryIssues(this: Arena) void {
|
||||
if (comptime FeatureFlags.help_catch_memory_issues) {
|
||||
this.gc(true);
|
||||
bun.Mimalloc.mi_collect(true);
|
||||
this.gc();
|
||||
bun.Mimalloc.mi_collect(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,8 +104,6 @@ pub const Arena = struct {
|
||||
|
||||
fn alloc(arena: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
const this = bun.cast(*mimalloc.Heap, arena);
|
||||
// if (comptime Environment.isDebug)
|
||||
// ArenaRegistry.assert(.{ .heap = this });
|
||||
|
||||
return alignedAlloc(
|
||||
this,
|
||||
@@ -247,16 +113,7 @@ pub const Arena = struct {
|
||||
}
|
||||
|
||||
fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
if (new_len <= buf.len) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const full_len = alignedAllocSize(buf.ptr);
|
||||
if (new_len <= full_len) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return mimalloc.mi_expand(buf.ptr, new_len) != null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
@@ -278,11 +135,36 @@ pub const Arena = struct {
|
||||
mimalloc.mi_free(buf.ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to expand or shrink memory, allowing relocation.
|
||||
///
|
||||
/// `memory.len` must equal the length requested from the most recent
|
||||
/// successful call to `alloc`, `resize`, or `remap`. `alignment` must
|
||||
/// equal the same value that was passed as the `alignment` parameter to
|
||||
/// the original `alloc` call.
|
||||
///
|
||||
/// A non-`null` return value indicates the resize was successful. The
|
||||
/// allocation may have same address, or may have been relocated. In either
|
||||
/// case, the allocation now has size of `new_len`. A `null` return value
|
||||
/// indicates that the resize would be equivalent to allocating new memory,
|
||||
/// copying the bytes from the old memory, and then freeing the old memory.
|
||||
/// In such case, it is more efficient for the caller to perform the copy.
|
||||
///
|
||||
/// `new_len` must be greater than zero.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
fn remap(this: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, _: usize) ?[*]u8 {
|
||||
const aligned_size = alignment.toByteUnits();
|
||||
const value = mimalloc.mi_heap_realloc_aligned(@ptrCast(this), buf.ptr, new_len, aligned_size);
|
||||
return @ptrCast(value);
|
||||
}
|
||||
};
|
||||
|
||||
const c_allocator_vtable = Allocator.VTable{
|
||||
.alloc = &Arena.alloc,
|
||||
.resize = &Arena.resize,
|
||||
.remap = &std.mem.Allocator.noRemap,
|
||||
.remap = &Arena.remap,
|
||||
.free = &Arena.free,
|
||||
};
|
||||
|
||||
@@ -128,8 +128,10 @@ pub const Features = struct {
|
||||
pub var process_dlopen: usize = 0;
|
||||
pub var postgres_connections: usize = 0;
|
||||
pub var s3: usize = 0;
|
||||
pub var valkey: usize = 0;
|
||||
pub var csrf_verify: usize = 0;
|
||||
pub var csrf_generate: usize = 0;
|
||||
pub var unsupported_uv_function: usize = 0;
|
||||
|
||||
comptime {
|
||||
@export(&napi_module_register, .{ .name = "Bun__napi_module_register_count" });
|
||||
|
||||
@@ -408,3 +408,54 @@ pub fn BabyList(comptime Type: type) type {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn OffsetList(comptime Type: type) type {
|
||||
return struct {
|
||||
head: u32 = 0,
|
||||
byte_list: List = .{},
|
||||
|
||||
const List = BabyList(Type);
|
||||
const ThisList = @This();
|
||||
|
||||
pub fn init(head: u32, byte_list: List) ThisList {
|
||||
return .{
|
||||
.head = head,
|
||||
.byte_list = byte_list,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *ThisList, allocator: std.mem.Allocator, bytes: []const u8) !void {
|
||||
_ = try self.byte_list.write(allocator, bytes);
|
||||
}
|
||||
|
||||
pub fn slice(this: *ThisList) []u8 {
|
||||
return this.byte_list.slice()[0..this.head];
|
||||
}
|
||||
|
||||
pub fn remaining(this: *ThisList) []u8 {
|
||||
return this.byte_list.slice()[this.head..];
|
||||
}
|
||||
|
||||
pub fn consume(self: *ThisList, bytes: u32) void {
|
||||
self.head +|= bytes;
|
||||
if (self.head >= self.byte_list.len) {
|
||||
self.head = 0;
|
||||
self.byte_list.len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(self: *const ThisList) u32 {
|
||||
return self.byte_list.len - self.head;
|
||||
}
|
||||
|
||||
pub fn clear(self: *ThisList) void {
|
||||
self.head = 0;
|
||||
self.byte_list.len = 0;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ThisList, allocator: std.mem.Allocator) void {
|
||||
self.byte_list.deinitWithAllocator(allocator);
|
||||
self.* = .{};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -749,9 +749,9 @@ pub fn deinit(dev: *DevServer) void {
|
||||
dev.active_websocket_connections.deinit(allocator);
|
||||
},
|
||||
.watcher_atomics = for (&dev.watcher_atomics.events) |*event| {
|
||||
event.aligned.dirs.deinit(dev.allocator);
|
||||
event.aligned.files.deinit(dev.allocator);
|
||||
event.aligned.extra_files.deinit(dev.allocator);
|
||||
event.dirs.deinit(dev.allocator);
|
||||
event.files.deinit(dev.allocator);
|
||||
event.extra_files.deinit(dev.allocator);
|
||||
},
|
||||
.testing_batch_events = switch (dev.testing_batch_events) {
|
||||
.disabled => {},
|
||||
@@ -6146,8 +6146,8 @@ fn markAllRouteChildrenFailed(dev: *DevServer, route_index: Route.Index) void {
|
||||
|
||||
/// This task informs the DevServer's thread about new files to be bundled.
|
||||
pub const HotReloadEvent = struct {
|
||||
/// Align to cache lines to eliminate contention.
|
||||
const Aligned = struct { aligned: HotReloadEvent align(std.atomic.cache_line) };
|
||||
/// Align to cache lines to eliminate false sharing.
|
||||
_: u0 align(std.atomic.cache_line) = 0,
|
||||
|
||||
owner: *DevServer,
|
||||
/// Initialized in WatcherAtomics.watcherReleaseAndSubmitEvent
|
||||
@@ -6387,7 +6387,7 @@ const WatcherAtomics = struct {
|
||||
/// once. Memory is reused by swapping between these two. These items are
|
||||
/// aligned to cache lines to reduce contention, since these structures are
|
||||
/// carefully passed between two threads.
|
||||
events: [2]HotReloadEvent.Aligned align(std.atomic.cache_line),
|
||||
events: [2]HotReloadEvent align(std.atomic.cache_line),
|
||||
/// 0 - no watch
|
||||
/// 1 - has fired additional watch
|
||||
/// 2+ - new events available, watcher is waiting on bundler to finish
|
||||
@@ -6401,10 +6401,7 @@ const WatcherAtomics = struct {
|
||||
|
||||
pub fn init(dev: *DevServer) WatcherAtomics {
|
||||
return .{
|
||||
.events = .{
|
||||
.{ .aligned = .initEmpty(dev) },
|
||||
.{ .aligned = .initEmpty(dev) },
|
||||
},
|
||||
.events = .{ .initEmpty(dev), .initEmpty(dev) },
|
||||
.current = 0,
|
||||
.watcher_events_emitted = .init(0),
|
||||
.watcher_has_event = .{},
|
||||
@@ -6417,7 +6414,7 @@ const WatcherAtomics = struct {
|
||||
fn watcherAcquireEvent(state: *WatcherAtomics) *HotReloadEvent {
|
||||
state.watcher_has_event.lock();
|
||||
|
||||
var ev: *HotReloadEvent = &state.events[state.current].aligned;
|
||||
var ev: *HotReloadEvent = &state.events[state.current];
|
||||
switch (ev.contention_indicator.swap(1, .seq_cst)) {
|
||||
0 => {
|
||||
// New event, initialize the timer if it is empty.
|
||||
@@ -6429,7 +6426,7 @@ const WatcherAtomics = struct {
|
||||
// DevServer stole this event. Unlikely but possible when
|
||||
// the user is saving very heavily (10-30 times per second)
|
||||
state.current +%= 1;
|
||||
ev = &state.events[state.current].aligned;
|
||||
ev = &state.events[state.current];
|
||||
if (Environment.allow_assert) {
|
||||
bun.assert(ev.contention_indicator.swap(1, .seq_cst) == 0);
|
||||
}
|
||||
@@ -6503,10 +6500,10 @@ const WatcherAtomics = struct {
|
||||
if (state.watcher_events_emitted.swap(0, .seq_cst) >= 2) {
|
||||
// Cannot use `state.current` because it will contend with the watcher.
|
||||
// Since there are are two events, one pointer comparison suffices
|
||||
const other_event = if (first_event == &state.events[0].aligned)
|
||||
&state.events[1].aligned
|
||||
const other_event = if (first_event == &state.events[0])
|
||||
&state.events[1]
|
||||
else
|
||||
&state.events[0].aligned;
|
||||
&state.events[0];
|
||||
|
||||
switch (other_event.contention_indicator.swap(1, .seq_cst)) {
|
||||
0 => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user