mirror of
https://github.com/oven-sh/bun
synced 2026-02-04 07:58:54 +00:00
Compare commits
17 Commits
dylan/pyth
...
jarred/mor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87a3132fc6 | ||
|
|
ff4202ce45 | ||
|
|
3a3932afde | ||
|
|
7853073906 | ||
|
|
fe561a7c47 | ||
|
|
2375b7d8b9 | ||
|
|
751a71932c | ||
|
|
a3835f1dd2 | ||
|
|
a6336cbb2e | ||
|
|
be54867260 | ||
|
|
ddfb9b2f7b | ||
|
|
c31a79880b | ||
|
|
b7aa35f00a | ||
|
|
9edf6439a3 | ||
|
|
f9c35dfbfa | ||
|
|
601cc84d5b | ||
|
|
99e1b10231 |
@@ -78,6 +78,7 @@ src/bun.js/bindings/JSDOMWrapper.cpp
|
||||
src/bun.js/bindings/JSDOMWrapperCache.cpp
|
||||
src/bun.js/bindings/JSEnvironmentVariableMap.cpp
|
||||
src/bun.js/bindings/JSFFIFunction.cpp
|
||||
src/bun.js/bindings/JSHTTPStats.cpp
|
||||
src/bun.js/bindings/JSMockFunction.cpp
|
||||
src/bun.js/bindings/JSNextTickQueue.cpp
|
||||
src/bun.js/bindings/JSPropertyIterator.cpp
|
||||
|
||||
@@ -222,6 +222,7 @@ src/bun.js/webcore/Response.zig
|
||||
src/bun.js/webcore/S3Client.zig
|
||||
src/bun.js/webcore/S3File.zig
|
||||
src/bun.js/webcore/S3Stat.zig
|
||||
src/bun.js/webcore/ScriptExecutionContext.zig
|
||||
src/bun.js/webcore/Sink.zig
|
||||
src/bun.js/webcore/streams.zig
|
||||
src/bun.js/webcore/TextDecoder.zig
|
||||
|
||||
@@ -415,6 +415,7 @@ set(BUN_OBJECT_LUT_SOURCES
|
||||
${CWD}/src/bun.js/bindings/ProcessBindingNatives.cpp
|
||||
${CWD}/src/bun.js/modules/NodeModuleModule.cpp
|
||||
${CODEGEN_PATH}/ZigGeneratedClasses.lut.txt
|
||||
${CWD}/src/bun.js/bindings/JSHTTPStats.cpp
|
||||
)
|
||||
|
||||
set(BUN_OBJECT_LUT_OUTPUTS
|
||||
@@ -428,6 +429,7 @@ set(BUN_OBJECT_LUT_OUTPUTS
|
||||
${CODEGEN_PATH}/ProcessBindingNatives.lut.h
|
||||
${CODEGEN_PATH}/NodeModuleModule.lut.h
|
||||
${CODEGEN_PATH}/ZigGeneratedClasses.lut.h
|
||||
${CODEGEN_PATH}/JSHTTPStats.lut.h
|
||||
)
|
||||
|
||||
macro(WEBKIT_ADD_SOURCE_DEPENDENCIES _source _deps)
|
||||
|
||||
3
packages/bun-types/bun.d.ts
vendored
3
packages/bun-types/bun.d.ts
vendored
@@ -1126,6 +1126,7 @@ declare module "bun" {
|
||||
* This will be used by fetch() and Bun.connect() to avoid DNS lookups.
|
||||
*
|
||||
* @param hostname The hostname to prefetch
|
||||
* @param port The port to prefetch. Default is 443. Port helps distinguish between IPv6 vs IPv4-only connections.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
@@ -1135,7 +1136,7 @@ declare module "bun" {
|
||||
* await fetch('https://example.com');
|
||||
* ```
|
||||
*/
|
||||
function prefetch(hostname: string): void;
|
||||
function prefetch(hostname: string, port?: number): void;
|
||||
|
||||
/**
|
||||
* **Experimental API**
|
||||
|
||||
58
packages/bun-types/globals.d.ts
vendored
58
packages/bun-types/globals.d.ts
vendored
@@ -1897,5 +1897,63 @@ declare namespace fetch {
|
||||
https?: boolean;
|
||||
},
|
||||
): void;
|
||||
|
||||
/**
|
||||
* Statistics about fetch() & node:http client requests.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* console.log(fetch.stats);
|
||||
* // {
|
||||
* // requests: 10,
|
||||
* // bytesWritten: 1000,
|
||||
* // bytesRead: 500,
|
||||
* // fail: 1,
|
||||
* // redirect: 2,
|
||||
* // success: 7,
|
||||
* // timeout: 0,
|
||||
* // refused: 0,
|
||||
* // active: 0,
|
||||
* // }
|
||||
* ```
|
||||
*/
|
||||
export const stats: {
|
||||
/**
|
||||
* Total number of HTTP requests initiated since the process started
|
||||
*/
|
||||
readonly requests: number;
|
||||
/**
|
||||
* Total number of bytes written in HTTP requests across the process (including Worker threads)
|
||||
*/
|
||||
readonly bytesWritten: number;
|
||||
/**
|
||||
* Total number of bytes read from fetch responses across the process (including Worker threads)
|
||||
*/
|
||||
readonly bytesRead: number;
|
||||
/**
|
||||
* Number of HTTP requests that failed for any reason across the process (including Worker threads)
|
||||
*/
|
||||
readonly fail: number;
|
||||
/**
|
||||
* Number of HTTP requests that were redirected across the process (including Worker threads)
|
||||
*/
|
||||
readonly redirect: number;
|
||||
/**
|
||||
* Number of HTTP requests that succeeded across the process (including Worker threads)
|
||||
*/
|
||||
readonly success: number;
|
||||
/**
|
||||
* Number of HTTP requests that timed out across the process (including Worker threads)
|
||||
*/
|
||||
readonly timeout: number;
|
||||
/**
|
||||
* Number of HTTP requests that were refused by the server across the process (including Worker threads)
|
||||
*/
|
||||
readonly refused: number;
|
||||
/**
|
||||
* Number of HTTP requests currently in progress across the process (including Worker threads)
|
||||
*/
|
||||
readonly active: number;
|
||||
};
|
||||
}
|
||||
//#endregion
|
||||
|
||||
@@ -504,7 +504,7 @@ void *us_socket_context_connect(int ssl, struct us_socket_context_t *context, co
|
||||
}
|
||||
|
||||
struct addrinfo_request* ai_req;
|
||||
if (Bun__addrinfo_get(loop, host, &ai_req) == 0) {
|
||||
if (Bun__addrinfo_get(loop, host, (uint16_t)port, &ai_req) == 0) {
|
||||
// fast path for cached results
|
||||
struct addrinfo_result *result = Bun__addrinfo_getRequestResult(ai_req);
|
||||
// fast failure path
|
||||
|
||||
@@ -107,7 +107,7 @@ struct addrinfo_result {
|
||||
#define us_internal_ssl_socket_context_r struct us_internal_ssl_socket_context_t *nonnull_arg
|
||||
#define us_internal_ssl_socket_r struct us_internal_ssl_socket_t *nonnull_arg
|
||||
|
||||
extern int Bun__addrinfo_get(struct us_loop_t* loop, const char* host, struct addrinfo_request** ptr);
|
||||
extern int Bun__addrinfo_get(struct us_loop_t* loop, const char* host, uint16_t port, struct addrinfo_request** ptr);
|
||||
extern int Bun__addrinfo_set(struct addrinfo_request* ptr, struct us_connecting_socket_t* socket);
|
||||
extern void Bun__addrinfo_freeRequest(struct addrinfo_request* addrinfo_req, int error);
|
||||
extern struct addrinfo_result *Bun__addrinfo_getRequestResult(struct addrinfo_request* addrinfo_req);
|
||||
|
||||
@@ -2960,7 +2960,7 @@ pub fn handleParseTaskFailure(
|
||||
err: anyerror,
|
||||
graph: bake.Graph,
|
||||
abs_path: []const u8,
|
||||
log: *const Log,
|
||||
log: *Log,
|
||||
bv2: *BundleV2,
|
||||
) bun.OOM!void {
|
||||
dev.graph_safety_lock.lock();
|
||||
@@ -4703,7 +4703,7 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
.abs_path => []const u8,
|
||||
.index => FileIndex,
|
||||
},
|
||||
log: *const Log,
|
||||
log: *Log,
|
||||
is_ssr_graph: bool,
|
||||
) bun.OOM!void {
|
||||
g.owner().graph_safety_lock.assertLocked();
|
||||
|
||||
@@ -1629,6 +1629,8 @@ pub fn resolveMaybeNeedsTrailingSlash(
|
||||
jsc_vm._resolve(&result, specifier_utf8.slice(), normalizeSource(source_utf8.slice()), is_esm, is_a_file_path) catch |err_| {
|
||||
var err = err_;
|
||||
const msg: logger.Msg = brk: {
|
||||
log.lock.lock();
|
||||
defer log.lock.unlock();
|
||||
const msgs: []logger.Msg = log.msgs.items;
|
||||
|
||||
for (msgs) |m| {
|
||||
|
||||
@@ -1195,24 +1195,34 @@ pub const InternalDNS = struct {
|
||||
pub const new = bun.TrivialNew(@This());
|
||||
const Key = struct {
|
||||
host: ?[:0]const u8,
|
||||
port: u16 = 0,
|
||||
hash: u64,
|
||||
|
||||
pub fn init(name: ?[:0]const u8) @This() {
|
||||
pub fn init(name: ?[:0]const u8, port: u16) @This() {
|
||||
const hash = if (name) |n| brk: {
|
||||
break :brk bun.hash(n);
|
||||
break :brk generateHash(n, port);
|
||||
} else 0;
|
||||
return .{
|
||||
.host = name,
|
||||
.hash = hash,
|
||||
.port = port,
|
||||
};
|
||||
}
|
||||
|
||||
fn generateHash(name: [:0]const u8, port: u16) u64 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
hasher.update(name);
|
||||
hasher.update(std.mem.asBytes(&port));
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
pub fn toOwned(this: @This()) @This() {
|
||||
if (this.host) |host| {
|
||||
const host_copy = bun.default_allocator.dupeZ(u8, host) catch bun.outOfMemory();
|
||||
return .{
|
||||
.host = host_copy,
|
||||
.hash = this.hash,
|
||||
.port = this.port,
|
||||
};
|
||||
} else {
|
||||
return this;
|
||||
@@ -1253,6 +1263,7 @@ pub const InternalDNS = struct {
|
||||
valid: bool = true,
|
||||
|
||||
libinfo: if (Environment.isMac) MacAsyncDNS else void = if (Environment.isMac) .{},
|
||||
can_retry_for_addrconfig: bool = default_hints.flags.ADDRCONFIG,
|
||||
|
||||
pub fn isExpired(this: *Request, timestamp_to_store: *u32) bool {
|
||||
if (this.refcount > 0 or this.result == null) {
|
||||
@@ -1387,7 +1398,7 @@ pub const InternalDNS = struct {
|
||||
var global_cache = GlobalCache{};
|
||||
|
||||
// we just hardcode a STREAM socktype
|
||||
const hints: std.c.addrinfo = .{
|
||||
const default_hints: std.c.addrinfo = .{
|
||||
.addr = null,
|
||||
.addrlen = 0,
|
||||
.canonname = null,
|
||||
@@ -1404,6 +1415,19 @@ pub const InternalDNS = struct {
|
||||
.protocol = 0,
|
||||
.socktype = std.c.SOCK.STREAM,
|
||||
};
|
||||
pub fn getHints() std.c.addrinfo {
|
||||
var hints_copy = default_hints;
|
||||
if (bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_ADDRCONFIG")) {
|
||||
hints_copy.flags.ADDRCONFIG = false;
|
||||
}
|
||||
if (bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_IPV6")) {
|
||||
hints_copy.family = std.c.AF.INET;
|
||||
} else if (bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_IPV4")) {
|
||||
hints_copy.family = std.c.AF.INET6;
|
||||
}
|
||||
|
||||
return hints_copy;
|
||||
}
|
||||
|
||||
extern fn us_internal_dns_callback(socket: *bun.uws.ConnectingSocket, req: *Request) void;
|
||||
extern fn us_internal_dns_callback_threadsafe(socket: *bun.uws.ConnectingSocket, req: *Request) void;
|
||||
@@ -1528,6 +1552,12 @@ pub const InternalDNS = struct {
|
||||
}
|
||||
|
||||
fn workPoolCallback(req: *Request) void {
|
||||
var service_buf: [bun.fmt.fastDigitCount(std.math.maxInt(u16)) + 2]u8 = undefined;
|
||||
const service: ?[*:0]const u8 = if (req.key.port > 0)
|
||||
(std.fmt.bufPrintZ(&service_buf, "{d}", .{req.key.port}) catch unreachable).ptr
|
||||
else
|
||||
null;
|
||||
|
||||
if (Environment.isWindows) {
|
||||
const wsa = std.os.windows.ws2_32;
|
||||
const wsa_hints = wsa.addrinfo{
|
||||
@@ -1544,19 +1574,33 @@ pub const InternalDNS = struct {
|
||||
var addrinfo: ?*wsa.addrinfo = null;
|
||||
const err = wsa.getaddrinfo(
|
||||
if (req.key.host) |host| host.ptr else null,
|
||||
null,
|
||||
service,
|
||||
&wsa_hints,
|
||||
&addrinfo,
|
||||
);
|
||||
afterResult(req, @ptrCast(addrinfo), err);
|
||||
} else {
|
||||
var addrinfo: ?*std.c.addrinfo = null;
|
||||
const err = std.c.getaddrinfo(
|
||||
var hints = getHints();
|
||||
|
||||
var err = std.c.getaddrinfo(
|
||||
if (req.key.host) |host| host.ptr else null,
|
||||
null,
|
||||
service,
|
||||
&hints,
|
||||
&addrinfo,
|
||||
);
|
||||
|
||||
// optional fallback
|
||||
if (err == .NONAME and hints.flags.ADDRCONFIG) {
|
||||
hints.flags.ADDRCONFIG = false;
|
||||
req.can_retry_for_addrconfig = false;
|
||||
err = std.c.getaddrinfo(
|
||||
if (req.key.host) |host| host.ptr else null,
|
||||
service,
|
||||
&hints,
|
||||
&addrinfo,
|
||||
);
|
||||
}
|
||||
afterResult(req, addrinfo, @intFromEnum(err));
|
||||
}
|
||||
}
|
||||
@@ -1565,10 +1609,18 @@ pub const InternalDNS = struct {
|
||||
const getaddrinfo_async_start_ = LibInfo.getaddrinfo_async_start() orelse return false;
|
||||
|
||||
var machport: ?*anyopaque = null;
|
||||
var service_buf: [bun.fmt.fastDigitCount(std.math.maxInt(u16)) + 2]u8 = undefined;
|
||||
const service: ?[*:0]const u8 = if (req.key.port > 0)
|
||||
(std.fmt.bufPrintZ(&service_buf, "{d}", .{req.key.port}) catch unreachable).ptr
|
||||
else
|
||||
null;
|
||||
|
||||
var hints = getHints();
|
||||
|
||||
const errno = getaddrinfo_async_start_(
|
||||
&machport,
|
||||
if (req.key.host) |host| host.ptr else null,
|
||||
null,
|
||||
service,
|
||||
&hints,
|
||||
libinfoCallback,
|
||||
req,
|
||||
@@ -1600,8 +1652,37 @@ pub const InternalDNS = struct {
|
||||
addr_info: ?*std.c.addrinfo,
|
||||
arg: ?*anyopaque,
|
||||
) callconv(.C) void {
|
||||
const req = bun.cast(*Request, arg);
|
||||
afterResult(req, addr_info, @intCast(status));
|
||||
const req: *Request = bun.cast(*Request, arg);
|
||||
const status_int: c_int = @intCast(status);
|
||||
if (status == @intFromEnum(std.c.EAI.NONAME) and req.can_retry_for_addrconfig) {
|
||||
req.can_retry_for_addrconfig = false;
|
||||
var service_buf: [bun.fmt.fastDigitCount(std.math.maxInt(u16)) + 2]u8 = undefined;
|
||||
const service: ?[*:0]const u8 = if (req.key.port > 0)
|
||||
(std.fmt.bufPrintZ(&service_buf, "{d}", .{req.key.port}) catch unreachable).ptr
|
||||
else
|
||||
null;
|
||||
const getaddrinfo_async_start_ = LibInfo.getaddrinfo_async_start() orelse return;
|
||||
var machport: ?*anyopaque = null;
|
||||
var hints = getHints();
|
||||
hints.flags.ADDRCONFIG = false;
|
||||
|
||||
_ = getaddrinfo_async_start_(
|
||||
&machport,
|
||||
if (req.key.host) |host| host.ptr else null,
|
||||
service,
|
||||
&hints,
|
||||
libinfoCallback,
|
||||
req,
|
||||
);
|
||||
|
||||
switch (req.libinfo.file_poll.?.register(bun.uws.Loop.get(), .machport, true)) {
|
||||
.err => log("libinfoCallback: failed to register poll", .{}),
|
||||
.result => {
|
||||
return;
|
||||
},
|
||||
}
|
||||
}
|
||||
afterResult(req, addr_info, @intCast(status_int));
|
||||
}
|
||||
|
||||
var dns_cache_hits_completed: usize = 0;
|
||||
@@ -1622,9 +1703,9 @@ pub const InternalDNS = struct {
|
||||
return object;
|
||||
}
|
||||
|
||||
pub fn getaddrinfo(loop: *bun.uws.Loop, host: ?[:0]const u8, is_cache_hit: ?*bool) ?*Request {
|
||||
pub fn getaddrinfo(loop: *bun.uws.Loop, host: ?[:0]const u8, port: u16, is_cache_hit: ?*bool) ?*Request {
|
||||
const preload = is_cache_hit == null;
|
||||
const key = Request.Key.init(host);
|
||||
const key = Request.Key.init(host, port);
|
||||
global_cache.lock.lock();
|
||||
getaddrinfo_calls += 1;
|
||||
var timestamp_to_store: u32 = 0;
|
||||
@@ -1683,7 +1764,7 @@ pub const InternalDNS = struct {
|
||||
}
|
||||
|
||||
pub fn prefetchFromJS(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSC.JSValue {
|
||||
const arguments = callframe.arguments_old(2).slice();
|
||||
const arguments = callframe.arguments();
|
||||
|
||||
if (arguments.len < 1) {
|
||||
return globalThis.throwNotEnoughArguments("prefetch", 1, arguments.len);
|
||||
@@ -1703,18 +1784,26 @@ pub const InternalDNS = struct {
|
||||
const hostname_z = try bun.default_allocator.dupeZ(u8, hostname_slice.slice());
|
||||
defer bun.default_allocator.free(hostname_z);
|
||||
|
||||
prefetch(JSC.VirtualMachine.get().uwsLoop(), hostname_z);
|
||||
const port: u16 = brk: {
|
||||
if (arguments.len > 1 and !arguments[1].isUndefinedOrNull()) {
|
||||
break :brk try globalThis.validateIntegerRange(arguments[1], u16, 443, .{ .field_name = "port", .always_allow_zero = true });
|
||||
} else {
|
||||
break :brk 443;
|
||||
}
|
||||
};
|
||||
|
||||
prefetch(JSC.VirtualMachine.get().uwsLoop(), hostname_z, port);
|
||||
return .undefined;
|
||||
}
|
||||
|
||||
pub fn prefetch(loop: *bun.uws.Loop, hostname: ?[:0]const u8) void {
|
||||
_ = getaddrinfo(loop, hostname, null);
|
||||
pub fn prefetch(loop: *bun.uws.Loop, hostname: ?[:0]const u8, port: u16) void {
|
||||
_ = getaddrinfo(loop, hostname, port, null);
|
||||
}
|
||||
|
||||
fn us_getaddrinfo(loop: *bun.uws.Loop, _host: ?[*:0]const u8, socket: *?*anyopaque) callconv(.C) c_int {
|
||||
fn us_getaddrinfo(loop: *bun.uws.Loop, _host: ?[*:0]const u8, port: u16, socket: *?*anyopaque) callconv(.C) c_int {
|
||||
const host: ?[:0]const u8 = std.mem.span(_host);
|
||||
var is_cache_hit: bool = false;
|
||||
const req = getaddrinfo(loop, host, &is_cache_hit).?;
|
||||
const req = getaddrinfo(loop, host, port, &is_cache_hit).?;
|
||||
socket.* = req;
|
||||
return if (is_cache_hit) 0 else 1;
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
#include "BunObjectModule.h"
|
||||
#include "JSCookie.h"
|
||||
#include "JSCookieMap.h"
|
||||
|
||||
#include "JSHTTPStats.h"
|
||||
#ifdef WIN32
|
||||
#include <ws2def.h>
|
||||
#else
|
||||
@@ -328,12 +328,14 @@ static JSValue constructPasswordObject(VM& vm, JSObject* bunObject)
|
||||
|
||||
JSValue constructBunFetchObject(VM& vm, JSObject* bunObject)
|
||||
{
|
||||
JSFunction* fetchFn = JSFunction::create(vm, bunObject->globalObject(), 1, "fetch"_s, Bun__fetch, ImplementationVisibility::Public, NoIntrinsic);
|
||||
auto* globalObject = defaultGlobalObject(bunObject->globalObject());
|
||||
JSFunction* fetchFn = JSFunction::create(vm, globalObject, 1, "fetch"_s, Bun__fetch, ImplementationVisibility::Public, NoIntrinsic);
|
||||
|
||||
auto* globalObject = jsCast<Zig::GlobalObject*>(bunObject->globalObject());
|
||||
fetchFn->putDirectNativeFunction(vm, globalObject, JSC::Identifier::fromString(vm, "preconnect"_s), 1, Bun__fetchPreconnect, ImplementationVisibility::Public, NoIntrinsic,
|
||||
JSC::PropertyAttribute::ReadOnly | JSC::PropertyAttribute::DontDelete | 0);
|
||||
|
||||
fetchFn->putDirect(vm, JSC::Identifier::fromString(vm, "stats"_s), Bun::constructBunHTTPStatsObject(bunObject->globalObject()), JSC::PropertyAttribute::ReadOnly | JSC::PropertyAttribute::DontDelete | 0);
|
||||
|
||||
return fetchFn;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#include "ErrorCode.h"
|
||||
|
||||
#include "root.h"
|
||||
#include "headers.h"
|
||||
|
||||
#include "ErrorCode.h"
|
||||
#include "ImportMetaObject.h"
|
||||
#include "ZigGlobalObject.h"
|
||||
#include "ActiveDOMObject.h"
|
||||
@@ -49,6 +50,7 @@
|
||||
#include <JavaScriptCore/JSPromise.h>
|
||||
#include "PathInlines.h"
|
||||
#include "wtf/text/StringView.h"
|
||||
#include "JSHTTPStats.h"
|
||||
|
||||
#include "isBuiltinModule.h"
|
||||
|
||||
|
||||
120
src/bun.js/bindings/JSHTTPStats.cpp
Normal file
120
src/bun.js/bindings/JSHTTPStats.cpp
Normal file
@@ -0,0 +1,120 @@
|
||||
#include "root.h"
|
||||
#include <JavaScriptCore/JSObject.h>
|
||||
#include <JavaScriptCore/JSString.h>
|
||||
#include <JavaScriptCore/JSFunction.h>
|
||||
#include <JavaScriptCore/JSGlobalObject.h>
|
||||
#include <JavaScriptCore/JSObject.h>
|
||||
#include <JavaScriptCore/ObjectConstructor.h>
|
||||
|
||||
namespace Bun {
|
||||
|
||||
using namespace JSC;
|
||||
|
||||
struct Bun__HTTPStats {
|
||||
std::atomic<uint64_t> total_requests;
|
||||
std::atomic<uint64_t> total_bytes_sent;
|
||||
std::atomic<uint64_t> total_bytes_received;
|
||||
std::atomic<uint64_t> total_requests_failed;
|
||||
std::atomic<uint64_t> total_requests_redirected;
|
||||
std::atomic<uint64_t> total_requests_succeeded;
|
||||
std::atomic<uint64_t> total_requests_timed_out;
|
||||
std::atomic<uint64_t> total_requests_connection_refused;
|
||||
};
|
||||
extern "C" Bun__HTTPStats Bun__HTTPStats;
|
||||
static_assert(std::atomic<uint64_t>::is_always_lock_free, "Bun__HTTPStats must be lock-free");
|
||||
|
||||
// clang-format off
|
||||
#define STATS_GETTER(name) \
|
||||
JSC_DEFINE_CUSTOM_GETTER(getStatsField_##name, (JSGlobalObject * globalObject, EncodedJSValue thisValue, PropertyName)) \
|
||||
{ \
|
||||
return JSValue::encode(jsNumber(Bun__HTTPStats.name)); \
|
||||
} \
|
||||
\
|
||||
|
||||
#define FOR_EACH_STATS_FIELD(macro) \
|
||||
macro(total_requests) \
|
||||
macro(total_bytes_sent) \
|
||||
macro(total_bytes_received) \
|
||||
macro(total_requests_failed) \
|
||||
macro(total_requests_redirected) \
|
||||
macro(total_requests_succeeded) \
|
||||
macro(total_requests_timed_out) \
|
||||
macro(total_requests_connection_refused)
|
||||
|
||||
// clang-format on
|
||||
|
||||
FOR_EACH_STATS_FIELD(STATS_GETTER)
|
||||
|
||||
#undef STATS_GETTER
|
||||
#undef FOR_EACH_STATS_FIELD
|
||||
|
||||
extern "C" std::atomic<uint64_t> Bun__HTTPStats__total_requests_active;
|
||||
|
||||
JSC_DEFINE_CUSTOM_GETTER(getStatsField_total_requests_active, (JSGlobalObject * globalObject, EncodedJSValue thisValue, PropertyName))
|
||||
{
|
||||
return JSValue::encode(jsNumber(Bun__HTTPStats__total_requests_active));
|
||||
}
|
||||
|
||||
class JSHTTPStatsObject final : public JSNonFinalObject {
|
||||
public:
|
||||
using Base = JSNonFinalObject;
|
||||
|
||||
static constexpr unsigned StructureFlags = Base::StructureFlags | HasStaticPropertyTable;
|
||||
|
||||
template<typename CellType, SubspaceAccess>
|
||||
static GCClient::IsoSubspace* subspaceFor(VM& vm)
|
||||
{
|
||||
return &vm.plainObjectSpace();
|
||||
}
|
||||
|
||||
static JSHTTPStatsObject* create(VM& vm, Structure* structure)
|
||||
{
|
||||
JSHTTPStatsObject* object = new (NotNull, allocateCell<JSHTTPStatsObject>(vm)) JSHTTPStatsObject(vm, structure);
|
||||
object->finishCreation(vm);
|
||||
return object;
|
||||
}
|
||||
|
||||
DECLARE_INFO;
|
||||
|
||||
static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
|
||||
{
|
||||
return Structure::create(vm, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), info());
|
||||
}
|
||||
|
||||
private:
|
||||
JSHTTPStatsObject(VM& vm, Structure* structure)
|
||||
: Base(vm, structure)
|
||||
{
|
||||
}
|
||||
|
||||
void finishCreation(VM& vm)
|
||||
{
|
||||
Base::finishCreation(vm);
|
||||
}
|
||||
};
|
||||
|
||||
/* Source for JSHTTPStats.lut.h
|
||||
@begin jsHTTPStatsObjectTable
|
||||
requests getStatsField_total_requests CustomAccessor|ReadOnly|DontDelete
|
||||
active getStatsField_total_requests_active CustomAccessor|ReadOnly|DontDelete
|
||||
success getStatsField_total_requests_succeeded CustomAccessor|ReadOnly|DontDelete
|
||||
bytesWritten getStatsField_total_bytes_sent CustomAccessor|ReadOnly|DontDelete
|
||||
bytesRead getStatsField_total_bytes_received CustomAccessor|ReadOnly|DontDelete
|
||||
fail getStatsField_total_requests_failed CustomAccessor|ReadOnly|DontDelete
|
||||
redirect getStatsField_total_requests_redirected CustomAccessor|ReadOnly|DontDelete
|
||||
timeout getStatsField_total_requests_timed_out CustomAccessor|ReadOnly|DontDelete
|
||||
refused getStatsField_total_requests_connection_refused CustomAccessor|ReadOnly|DontDelete
|
||||
@end
|
||||
*/
|
||||
#include "JSHTTPStats.lut.h"
|
||||
|
||||
const ClassInfo JSHTTPStatsObject::s_info = { "HTTPStats"_s, &Base::s_info, &jsHTTPStatsObjectTable, nullptr, CREATE_METHOD_TABLE(JSHTTPStatsObject) };
|
||||
|
||||
JSC::JSObject* constructBunHTTPStatsObject(JSC::JSGlobalObject* globalObject)
|
||||
{
|
||||
auto& vm = globalObject->vm();
|
||||
|
||||
return JSHTTPStatsObject::create(vm, JSHTTPStatsObject::createStructure(vm, globalObject, globalObject->objectPrototype()));
|
||||
}
|
||||
|
||||
}
|
||||
4
src/bun.js/bindings/JSHTTPStats.h
Normal file
4
src/bun.js/bindings/JSHTTPStats.h
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
namespace Bun {
|
||||
JSC::JSObject* constructBunHTTPStatsObject(JSC::JSGlobalObject* globalObject);
|
||||
}
|
||||
@@ -2290,7 +2290,7 @@ pub const BundleV2 = struct {
|
||||
const source = &this.graph.input_files.items(.source)[load.source_index.get()];
|
||||
// A stack-allocated Log object containing the singular message
|
||||
var msg_mut = msg;
|
||||
const temp_log: Logger.Log = .{
|
||||
var temp_log: Logger.Log = .{
|
||||
.clone_line_text = false,
|
||||
.errors = @intFromBool(msg.kind == .err),
|
||||
.warnings = @intFromBool(msg.kind == .warn),
|
||||
|
||||
@@ -176,6 +176,10 @@ pub const Loader = struct {
|
||||
return this.getHttpProxy(url.isHTTP(), url.hostname);
|
||||
}
|
||||
|
||||
pub fn hasHTTPProxy(this: *const Loader) bool {
|
||||
return this.has("http_proxy") or this.has("HTTP_PROXY") or this.has("https_proxy") or this.has("HTTPS_PROXY");
|
||||
}
|
||||
|
||||
pub fn getHttpProxy(this: *Loader, is_http: bool, hostname: ?[]const u8) ?URL {
|
||||
// TODO: When Web Worker support is added, make sure to intern these strings
|
||||
var http_proxy: ?URL = null;
|
||||
|
||||
248
src/http.zig
248
src/http.zig
@@ -116,6 +116,170 @@ pub const FetchRedirect = enum(u8) {
|
||||
});
|
||||
};
|
||||
|
||||
pub const Stats = extern struct {
|
||||
total_requests: std.atomic.Value(u64) = .init(0),
|
||||
total_bytes_sent: std.atomic.Value(u64) = .init(0),
|
||||
total_bytes_received: std.atomic.Value(u64) = .init(0),
|
||||
total_requests_failed: std.atomic.Value(u64) = .init(0),
|
||||
total_requests_redirected: std.atomic.Value(u64) = .init(0),
|
||||
total_requests_succeeded: std.atomic.Value(u64) = .init(0),
|
||||
total_requests_timed_out: std.atomic.Value(u64) = .init(0),
|
||||
total_requests_connection_refused: std.atomic.Value(u64) = .init(0),
|
||||
|
||||
pub var instance: Stats = .{};
|
||||
|
||||
pub fn addRequest() void {
|
||||
_ = instance.total_requests.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addBytesSent(bytes: u64) void {
|
||||
_ = instance.total_bytes_sent.fetchAdd(bytes, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addBytesReceived(bytes: u64) void {
|
||||
_ = instance.total_bytes_received.fetchAdd(bytes, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addRequestsFailed() void {
|
||||
_ = instance.total_requests_failed.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addRequestsRedirected() void {
|
||||
_ = instance.total_requests_redirected.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addRequestsSucceeded() void {
|
||||
_ = instance.total_requests_succeeded.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addRequestsTimedOut() void {
|
||||
_ = instance.total_requests_timed_out.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn addRequestsConnectionRefused() void {
|
||||
_ = instance.total_requests_connection_refused.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn fmt() Formatter {
|
||||
return .{
|
||||
.enable_color = bun.Output.enable_ansi_colors_stderr,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Formatter = struct {
|
||||
enable_color: bool = false,
|
||||
|
||||
pub fn format(this: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
const total_requests = Stats.instance.total_requests.load(.monotonic);
|
||||
const total_bytes_sent = Stats.instance.total_bytes_sent.load(.monotonic);
|
||||
const total_bytes_received = Stats.instance.total_bytes_received.load(.monotonic);
|
||||
const total_requests_failed = Stats.instance.total_requests_failed.load(.monotonic);
|
||||
const total_requests_redirected = Stats.instance.total_requests_redirected.load(.monotonic);
|
||||
const total_requests_succeeded = Stats.instance.total_requests_succeeded.load(.monotonic);
|
||||
const total_requests_timed_out = Stats.instance.total_requests_timed_out.load(.monotonic);
|
||||
const total_requests_connection_refused = Stats.instance.total_requests_connection_refused.load(.monotonic);
|
||||
const active_requests = AsyncHTTP.active_requests_count.load(.monotonic);
|
||||
var needs_space = false;
|
||||
|
||||
if (!(total_bytes_received > 0 or
|
||||
total_bytes_sent > 0 or
|
||||
total_requests > 0 or
|
||||
active_requests > 0 or
|
||||
total_requests_failed > 0 or
|
||||
total_requests_redirected > 0 or
|
||||
total_requests_succeeded > 0 or
|
||||
total_requests_timed_out > 0 or
|
||||
total_requests_connection_refused > 0))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
switch (this.enable_color) {
|
||||
inline else => |enable_ansi_colors| {
|
||||
try writer.writeAll(Output.prettyFmt("\n <d>http stats | <r> ", enable_ansi_colors));
|
||||
needs_space = false;
|
||||
|
||||
if (active_requests > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
try writer.print(
|
||||
Output.prettyFmt("active<d>:<r> <blue><b>{}<r>", enable_ansi_colors),
|
||||
.{active_requests},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
if (total_requests_succeeded > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
needs_space = true;
|
||||
try writer.print(
|
||||
Output.prettyFmt("ok<d>:<r> <green><b>{}<r>", enable_ansi_colors),
|
||||
.{total_requests_succeeded},
|
||||
);
|
||||
}
|
||||
|
||||
if (total_requests_failed > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
try writer.print(
|
||||
Output.prettyFmt("fail<d>:<r> <red><b>{}<r>", enable_ansi_colors),
|
||||
.{total_requests_failed},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
if (total_bytes_received > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
try writer.print(
|
||||
Output.prettyFmt("recv<d>:<r> <b>{}<r>", enable_ansi_colors),
|
||||
.{bun.fmt.size(total_bytes_received, .{})},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
if (total_bytes_sent > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
try writer.print(
|
||||
Output.prettyFmt("sent<d>:<r> <b>{}<r>", enable_ansi_colors),
|
||||
.{bun.fmt.size(total_bytes_sent, .{})},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
if (total_requests_redirected > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
try writer.print(
|
||||
Output.prettyFmt("redirect<d>:<r> <yellow>{}<r>", enable_ansi_colors),
|
||||
.{total_requests_redirected},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
if (total_requests_timed_out > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
needs_space = true;
|
||||
try writer.print(
|
||||
Output.prettyFmt("timeout<d>:<r> <b>{}<r>", enable_ansi_colors),
|
||||
.{total_requests_timed_out},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
if (total_requests_connection_refused > 0) {
|
||||
if (needs_space) try writer.writeAll(Output.prettyFmt(" <d>|<r> ", enable_ansi_colors));
|
||||
needs_space = true;
|
||||
try writer.print(
|
||||
Output.prettyFmt("refused<d>:<r> <red>{}<r>", enable_ansi_colors),
|
||||
.{total_requests_connection_refused},
|
||||
);
|
||||
needs_space = true;
|
||||
}
|
||||
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const HTTPRequestBody = union(enum) {
|
||||
bytes: []const u8,
|
||||
sendfile: Sendfile,
|
||||
@@ -411,7 +575,9 @@ const ProxyTunnel = struct {
|
||||
.tcp => |socket| socket.write(encoded_data, true),
|
||||
.none => 0,
|
||||
};
|
||||
const pending = encoded_data[@intCast(written)..];
|
||||
const written_bytes: usize = @intCast(@max(written, 0));
|
||||
Stats.addBytesSent(written_bytes);
|
||||
const pending = encoded_data[written_bytes..];
|
||||
if (pending.len > 0) {
|
||||
// lets flush when we are truly writable
|
||||
proxy.write_buffer.write(pending) catch bun.outOfMemory();
|
||||
@@ -489,6 +655,8 @@ const ProxyTunnel = struct {
|
||||
return;
|
||||
}
|
||||
const written = socket.write(encoded_data, true);
|
||||
const written_bytes: usize = @intCast(@max(written, 0));
|
||||
Stats.addBytesSent(written_bytes);
|
||||
if (written == encoded_data.len) {
|
||||
this.write_buffer.reset();
|
||||
return;
|
||||
@@ -1512,6 +1680,7 @@ pub const HTTPThread = struct {
|
||||
|
||||
{
|
||||
var batch_ = batch;
|
||||
_ = Stats.instance.total_requests.fetchAdd(batch.len, .monotonic);
|
||||
while (batch_.pop()) |task| {
|
||||
const http: *AsyncHTTP = @fieldParentPtr("task", task);
|
||||
this.queued_tasks.push(http);
|
||||
@@ -1746,12 +1915,14 @@ pub fn onTimeout(
|
||||
log("Timeout {s}\n", .{client.url.href});
|
||||
|
||||
defer NewHTTPContext(is_ssl).terminateSocket(socket);
|
||||
Stats.addRequestsTimedOut();
|
||||
client.fail(error.Timeout);
|
||||
}
|
||||
pub fn onConnectError(
|
||||
client: *HTTPClient,
|
||||
) void {
|
||||
log("onConnectError {s}\n", .{client.url.href});
|
||||
Stats.addRequestsConnectionRefused();
|
||||
client.fail(error.ConnectionRefused);
|
||||
}
|
||||
|
||||
@@ -1780,13 +1951,6 @@ pub inline fn cleanup(force: bool) void {
|
||||
default_arena.gc(force);
|
||||
}
|
||||
|
||||
pub const SOCKET_FLAGS: u32 = if (Environment.isLinux)
|
||||
SOCK.CLOEXEC | posix.MSG.NOSIGNAL
|
||||
else
|
||||
SOCK.CLOEXEC;
|
||||
|
||||
pub const OPEN_SOCKET_FLAGS = SOCK.CLOEXEC;
|
||||
|
||||
pub const extremely_verbose = false;
|
||||
|
||||
fn writeProxyConnect(
|
||||
@@ -2292,10 +2456,10 @@ pub fn isKeepAlivePossible(this: *HTTPClient) bool {
|
||||
if (comptime FeatureFlags.enable_keepalive) {
|
||||
// TODO keepalive for unix sockets
|
||||
if (this.unix_socket_path.length() > 0) return false;
|
||||
// is not possible to reuse Proxy with TSL, so disable keepalive if url is tunneling HTTPS
|
||||
if (this.http_proxy != null and this.url.isHTTPS()) {
|
||||
return false;
|
||||
}
|
||||
// // is not possible to reuse Proxy with TSL, so disable keepalive if url is tunneling HTTPS
|
||||
// if (this.http_proxy != null and this.url.isHTTPS()) {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
//check state
|
||||
if (this.state.flags.allow_keepalive and !this.flags.disable_keepalive) return true;
|
||||
@@ -2377,7 +2541,9 @@ const accept_header = picohttp.Header{ .name = "Accept", .value = "*/*" };
|
||||
|
||||
const accept_encoding_no_compression = "identity";
|
||||
const accept_encoding_compression = "gzip, deflate, br";
|
||||
const accept_encoding_compression_for_proxy = "gzip, deflate";
|
||||
const accept_encoding_header_compression = picohttp.Header{ .name = "Accept-Encoding", .value = accept_encoding_compression };
|
||||
const accept_encoding_header_compression_for_proxy = picohttp.Header{ .name = "Accept-Encoding", .value = accept_encoding_compression_for_proxy };
|
||||
const accept_encoding_header_no_compression = picohttp.Header{ .name = "Accept-Encoding", .value = accept_encoding_no_compression };
|
||||
|
||||
const accept_encoding_header = if (FeatureFlags.disable_compression_in_http_client)
|
||||
@@ -2453,6 +2619,11 @@ pub const AsyncHTTP = struct {
|
||||
pub var active_requests_count = std.atomic.Value(usize).init(0);
|
||||
pub var max_simultaneous_requests = std.atomic.Value(usize).init(256);
|
||||
|
||||
comptime {
|
||||
// This is not part of Stats because it's used in other places
|
||||
@export(&active_requests_count, .{ .name = "Bun__HTTPStats__total_requests_active" });
|
||||
}
|
||||
|
||||
pub fn loadEnv(allocator: std.mem.Allocator, logger: *Log, env: *DotEnv.Loader) void {
|
||||
if (env.get("BUN_CONFIG_MAX_HTTP_REQUESTS")) |max_http_requests| {
|
||||
const max = std.fmt.parseInt(u16, max_http_requests, 10) catch {
|
||||
@@ -2981,7 +3152,11 @@ pub fn buildRequest(this: *HTTPClient, body_len: usize) picohttp.Request {
|
||||
}
|
||||
|
||||
if (!override_accept_encoding and !this.flags.disable_decompression) {
|
||||
request_headers_buf[header_count] = accept_encoding_header;
|
||||
request_headers_buf[header_count] = if (this.http_proxy != null or this.proxy_tunnel != null)
|
||||
// Disable brotli compression for HTTP Proxies due to issues with transfer-encoding chunked + brotli.
|
||||
accept_encoding_header_compression_for_proxy
|
||||
else
|
||||
accept_encoding_header;
|
||||
|
||||
header_count += 1;
|
||||
}
|
||||
@@ -3272,7 +3447,9 @@ noinline fn sendInitialRequestPayload(this: *HTTPClient, comptime is_first_call:
|
||||
return error.WriteFailed;
|
||||
}
|
||||
|
||||
this.state.request_sent_len += @as(usize, @intCast(amount));
|
||||
const sent_bytes: usize = @intCast(@max(amount, 0));
|
||||
this.state.request_sent_len += sent_bytes;
|
||||
Stats.addBytesSent(sent_bytes);
|
||||
const has_sent_headers = this.state.request_sent_len >= headers_len;
|
||||
|
||||
if (has_sent_headers and this.verbose != .none) {
|
||||
@@ -3371,9 +3548,11 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
|
||||
this.closeAndFail(error.WriteFailed, is_ssl, socket);
|
||||
return;
|
||||
}
|
||||
const sent_bytes: usize = @intCast(@max(amount, 0));
|
||||
|
||||
this.state.request_sent_len += @as(usize, @intCast(amount));
|
||||
this.state.request_body = this.state.request_body[@as(usize, @intCast(amount))..];
|
||||
Stats.addBytesSent(sent_bytes);
|
||||
this.state.request_sent_len += sent_bytes;
|
||||
this.state.request_body = this.state.request_body[sent_bytes..];
|
||||
|
||||
if (this.state.request_body.len == 0) {
|
||||
this.state.request_stage = .done;
|
||||
@@ -3391,8 +3570,10 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
|
||||
this.closeAndFail(error.WriteFailed, is_ssl, socket);
|
||||
return;
|
||||
}
|
||||
this.state.request_sent_len += @as(usize, @intCast(amount));
|
||||
stream.buffer.cursor += @intCast(amount);
|
||||
const sent_bytes: usize = @intCast(@max(amount, 0));
|
||||
this.state.request_sent_len += sent_bytes;
|
||||
Stats.addBytesSent(sent_bytes);
|
||||
stream.buffer.cursor += sent_bytes;
|
||||
if (amount < to_send.len) {
|
||||
stream.has_backpressure = true;
|
||||
}
|
||||
@@ -3436,8 +3617,10 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
|
||||
const to_send = this.state.request_body;
|
||||
const amount = proxy.writeData(to_send) catch return; // just wait and retry when onWritable! if closed internally will call proxy.onClose
|
||||
|
||||
this.state.request_sent_len += @as(usize, @intCast(amount));
|
||||
this.state.request_body = this.state.request_body[@as(usize, @intCast(amount))..];
|
||||
const sent_bytes: usize = @intCast(@max(amount, 0));
|
||||
this.state.request_sent_len += sent_bytes;
|
||||
Stats.addBytesSent(sent_bytes);
|
||||
this.state.request_body = this.state.request_body[sent_bytes..];
|
||||
|
||||
if (this.state.request_body.len == 0) {
|
||||
this.state.request_stage = .done;
|
||||
@@ -3452,8 +3635,10 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
|
||||
if (stream.buffer.isNotEmpty()) {
|
||||
const to_send = stream.buffer.slice();
|
||||
const amount = proxy.writeData(to_send) catch return; // just wait and retry when onWritable! if closed internally will call proxy.onClose
|
||||
this.state.request_sent_len += amount;
|
||||
stream.buffer.cursor += @truncate(amount);
|
||||
const sent_bytes: usize = @intCast(@max(amount, 0));
|
||||
this.state.request_sent_len += sent_bytes;
|
||||
Stats.addBytesSent(sent_bytes);
|
||||
stream.buffer.cursor += sent_bytes;
|
||||
if (amount < to_send.len) {
|
||||
stream.has_backpressure = true;
|
||||
}
|
||||
@@ -3517,7 +3702,9 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
|
||||
}
|
||||
}
|
||||
|
||||
this.state.request_sent_len += @as(usize, @intCast(amount));
|
||||
const sent_bytes: usize = @intCast(@max(amount, 0));
|
||||
this.state.request_sent_len += sent_bytes;
|
||||
Stats.addBytesSent(sent_bytes);
|
||||
const has_sent_headers = this.state.request_sent_len >= headers_len;
|
||||
|
||||
if (has_sent_headers and this.state.request_body.len > 0) {
|
||||
@@ -3676,6 +3863,12 @@ pub fn handleOnDataHeaders(
|
||||
if (this.flags.proxy_tunneling and this.proxy_tunnel == null) {
|
||||
// we are proxing we dont need to cloneMetadata yet
|
||||
this.startProxyHandshake(is_ssl, socket);
|
||||
|
||||
if (body_buf.len > 0) {
|
||||
if (this.proxy_tunnel) |proxy| {
|
||||
proxy.receiveData(body_buf);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3731,6 +3924,7 @@ pub fn onData(
|
||||
socket: NewHTTPContext(is_ssl).HTTPSocket,
|
||||
) void {
|
||||
log("onData {}", .{incoming_data.len});
|
||||
Stats.addBytesReceived(incoming_data.len);
|
||||
if (this.signals.get(.aborted)) {
|
||||
this.closeAndAbort(is_ssl, socket);
|
||||
return;
|
||||
@@ -3823,13 +4017,13 @@ fn fail(this: *HTTPClient, err: anyerror) void {
|
||||
this.state.response_stage = .fail;
|
||||
this.state.fail = err;
|
||||
this.state.stage = .fail;
|
||||
Stats.addRequestsFailed();
|
||||
|
||||
if (!this.flags.defer_fail_until_connecting_is_complete) {
|
||||
const callback = this.result_callback;
|
||||
const result = this.toResult();
|
||||
this.state.reset(this.allocator);
|
||||
this.flags.proxy_tunneling = false;
|
||||
|
||||
callback.run(@fieldParentPtr("client", this), result);
|
||||
}
|
||||
}
|
||||
@@ -3915,6 +4109,7 @@ pub fn progressUpdate(this: *HTTPClient, comptime is_ssl: bool, ctx: *NewHTTPCon
|
||||
this.state.request_stage = .done;
|
||||
this.state.stage = .done;
|
||||
this.flags.proxy_tunneling = false;
|
||||
Stats.addRequestsSucceeded();
|
||||
}
|
||||
|
||||
result.body.?.* = body;
|
||||
@@ -4647,6 +4842,7 @@ pub fn handleResponseMetadata(
|
||||
}
|
||||
}
|
||||
this.state.flags.is_redirect_pending = true;
|
||||
Stats.addRequestsRedirected();
|
||||
if (this.method.hasRequestBody()) {
|
||||
this.state.flags.resend_request_body_on_redirect = true;
|
||||
}
|
||||
@@ -4847,3 +5043,7 @@ pub const Headers = struct {
|
||||
return headers;
|
||||
}
|
||||
};
|
||||
|
||||
comptime {
|
||||
@export(&Stats.instance, .{ .name = "Bun__HTTPStats" });
|
||||
}
|
||||
|
||||
@@ -32,20 +32,20 @@ integrity: Integrity = .{},
|
||||
url: strings.StringOrTinyString,
|
||||
package_manager: *PackageManager,
|
||||
|
||||
pub inline fn run(this: *const ExtractTarball, bytes: []const u8) !Install.ExtractData {
|
||||
pub inline fn run(this: *const ExtractTarball, log: *logger.Log, bytes: []const u8) !Install.ExtractData {
|
||||
if (!this.skip_verify and this.integrity.tag.isSupported()) {
|
||||
if (!this.integrity.verify(bytes)) {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"Integrity check failed<r> for tarball: {s}",
|
||||
.{this.name.slice()},
|
||||
) catch unreachable;
|
||||
return error.IntegrityCheckFailed;
|
||||
}
|
||||
}
|
||||
return this.extract(bytes);
|
||||
return this.extract(log, bytes);
|
||||
}
|
||||
|
||||
pub fn buildURL(
|
||||
@@ -127,7 +127,7 @@ threadlocal var final_path_buf: bun.PathBuffer = undefined;
|
||||
threadlocal var folder_name_buf: bun.PathBuffer = undefined;
|
||||
threadlocal var json_path_buf: bun.PathBuffer = undefined;
|
||||
|
||||
fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractData {
|
||||
fn extract(this: *const ExtractTarball, log: *logger.Log, tgz_bytes: []const u8) !Install.ExtractData {
|
||||
const tracer = bun.perf.trace("ExtractTarball.extract");
|
||||
defer tracer.end();
|
||||
|
||||
@@ -161,10 +161,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
const tmpname = try FileSystem.instance.tmpname(basename[0..@min(basename.len, 32)], std.mem.asBytes(&tmpname_buf), bun.fastRandom());
|
||||
{
|
||||
var extract_destination = bun.MakePath.makeOpenPath(tmpdir, bun.span(tmpname), .{}) catch |err| {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"{s} when create temporary directory named \"{s}\" (while extracting \"{s}\")",
|
||||
.{ @errorName(err), tmpname, name },
|
||||
) catch unreachable;
|
||||
@@ -220,10 +220,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
zlib_pool.data.list.clearRetainingCapacity();
|
||||
var zlib_entry = try Zlib.ZlibReaderArrayList.init(tgz_bytes, &zlib_pool.data.list, default_allocator);
|
||||
zlib_entry.readAll() catch |err| {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"{s} decompressing \"{s}\" to \"{}\"",
|
||||
.{ @errorName(err), name, bun.fmt.fmtPath(u8, std.mem.span(tmpname), .{}) },
|
||||
) catch unreachable;
|
||||
@@ -251,7 +251,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
var dirname_reader = DirnameReader{ .outdirname = &resolved };
|
||||
|
||||
switch (PackageManager.verbose_install) {
|
||||
inline else => |log| _ = try Archiver.extractToDir(
|
||||
inline else => |verbose_log| _ = try Archiver.extractToDir(
|
||||
zlib_pool.data.list.items,
|
||||
extract_destination,
|
||||
null,
|
||||
@@ -260,7 +260,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
.{
|
||||
// for GitHub tarballs, the root dir is always <user>-<repo>-<commit_id>
|
||||
.depth_to_skip = 1,
|
||||
.log = log,
|
||||
.log = verbose_log,
|
||||
},
|
||||
),
|
||||
}
|
||||
@@ -277,14 +277,14 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
}
|
||||
},
|
||||
else => switch (PackageManager.verbose_install) {
|
||||
inline else => |log| _ = try Archiver.extractToDir(
|
||||
inline else => |verbose_log| _ = try Archiver.extractToDir(
|
||||
zlib_pool.data.list.items,
|
||||
extract_destination,
|
||||
null,
|
||||
void,
|
||||
{},
|
||||
.{
|
||||
.log = log,
|
||||
.log = verbose_log,
|
||||
// packages usually have root directory `package/`, and scoped packages usually have root `<scopename>/`
|
||||
// https://github.com/npm/cli/blob/93883bb6459208a916584cad8c6c72a315cf32af/node_modules/pacote/lib/fetcher.js#L442
|
||||
.depth_to_skip = 1,
|
||||
@@ -334,10 +334,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
.read_only = true,
|
||||
}).unwrap() catch |err| {
|
||||
// i guess we just
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"moving \"{s}\" to cache dir failed\n{}\n From: {s}\n To: {s}",
|
||||
.{ name, err, tmpname, folder_name },
|
||||
) catch unreachable;
|
||||
@@ -383,10 +383,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
}
|
||||
}
|
||||
dir_to_move.close();
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"moving \"{s}\" to cache dir failed\n{}\n From: {s}\n To: {s}",
|
||||
.{ name, err, tmpname, folder_name },
|
||||
) catch unreachable;
|
||||
@@ -423,10 +423,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
folder_name,
|
||||
.{ .move_fallback = true },
|
||||
).asErr()) |err| {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"moving \"{s}\" to cache dir failed: {}\n From: {s}\n To: {s}",
|
||||
.{ name, err, tmpname, folder_name },
|
||||
) catch unreachable;
|
||||
@@ -437,10 +437,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
// We return a resolved absolute absolute file path to the cache dir.
|
||||
// To get that directory, we open the directory again.
|
||||
var final_dir = bun.openDir(cache_dir, folder_name) catch |err| {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"failed to verify cache dir for \"{s}\": {s}",
|
||||
.{ name, @errorName(err) },
|
||||
) catch unreachable;
|
||||
@@ -452,10 +452,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
.fromStdDir(final_dir),
|
||||
&final_path_buf,
|
||||
) catch |err| {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"failed to resolve cache dir for \"{s}\": {s}",
|
||||
.{ name, @errorName(err) },
|
||||
) catch unreachable;
|
||||
@@ -474,7 +474,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
}) {
|
||||
const json_file, json_buf = bun.sys.File.readFileFrom(
|
||||
bun.FD.fromStdDir(cache_dir),
|
||||
bun.path.joinZ(&[_]string{ folder_name, "package.json" }, .auto),
|
||||
bun.path.joinZBuf(&json_path_buf, &[_]string{ folder_name, "package.json" }, .auto),
|
||||
bun.default_allocator,
|
||||
).unwrap() catch |err| {
|
||||
if (this.resolution.tag == .github and err == error.ENOENT) {
|
||||
@@ -485,10 +485,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
};
|
||||
}
|
||||
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"\"package.json\" for \"{s}\" failed to open: {s}",
|
||||
.{ name, @errorName(err) },
|
||||
) catch unreachable;
|
||||
@@ -498,10 +498,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
json_path = json_file.getPath(
|
||||
&json_path_buf,
|
||||
).unwrap() catch |err| {
|
||||
this.package_manager.log.addErrorFmt(
|
||||
log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc.Empty,
|
||||
this.package_manager.allocator,
|
||||
bun.default_allocator,
|
||||
"\"package.json\" for \"{s}\" failed to resolve: {s}",
|
||||
.{ name, @errorName(err) },
|
||||
) catch unreachable;
|
||||
@@ -509,37 +509,39 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
|
||||
};
|
||||
}
|
||||
|
||||
// create an index storing each version of a package installed
|
||||
if (strings.indexOfChar(basename, '/') == null) create_index: {
|
||||
const dest_name = switch (this.resolution.tag) {
|
||||
.github => folder_name["@GH@".len..],
|
||||
// trim "name@" from the prefix
|
||||
.npm => folder_name[name.len + 1 ..],
|
||||
else => folder_name,
|
||||
};
|
||||
|
||||
if (comptime Environment.isWindows) {
|
||||
bun.MakePath.makePath(u8, cache_dir, name) catch {
|
||||
break :create_index;
|
||||
if (!bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_INSTALL_INDEX")) {
|
||||
// create an index storing each version of a package installed
|
||||
if (strings.indexOfChar(basename, '/') == null) create_index: {
|
||||
const dest_name = switch (this.resolution.tag) {
|
||||
.github => folder_name["@GH@".len..],
|
||||
// trim "name@" from the prefix
|
||||
.npm => folder_name[name.len + 1 ..],
|
||||
else => folder_name,
|
||||
};
|
||||
|
||||
var dest_buf: bun.PathBuffer = undefined;
|
||||
const dest_path = bun.path.joinAbsStringBufZ(
|
||||
// only set once, should be fine to read not on main thread
|
||||
this.package_manager.cache_directory_path,
|
||||
&dest_buf,
|
||||
&[_]string{ name, dest_name },
|
||||
.windows,
|
||||
);
|
||||
if (comptime Environment.isWindows) {
|
||||
bun.MakePath.makePath(u8, cache_dir, name) catch {
|
||||
break :create_index;
|
||||
};
|
||||
|
||||
bun.sys.sys_uv.symlinkUV(final_path, dest_path, bun.windows.libuv.UV_FS_SYMLINK_JUNCTION).unwrap() catch {
|
||||
break :create_index;
|
||||
};
|
||||
} else {
|
||||
var index_dir = bun.FD.fromStdDir(bun.MakePath.makeOpenPath(cache_dir, name, .{}) catch break :create_index);
|
||||
defer index_dir.close();
|
||||
var dest_buf: bun.PathBuffer = undefined;
|
||||
const dest_path = bun.path.joinAbsStringBufZ(
|
||||
// only set once, should be fine to read not on main thread
|
||||
this.package_manager.cache_directory_path,
|
||||
&dest_buf,
|
||||
&[_]string{ name, dest_name },
|
||||
.windows,
|
||||
);
|
||||
|
||||
bun.sys.symlinkat(final_path, index_dir, dest_name).unwrap() catch break :create_index;
|
||||
bun.sys.sys_uv.symlinkUV(final_path, dest_path, bun.windows.libuv.UV_FS_SYMLINK_JUNCTION).unwrap() catch {
|
||||
break :create_index;
|
||||
};
|
||||
} else {
|
||||
var index_dir = bun.FD.fromStdDir(bun.MakePath.makeOpenPath(cache_dir, name, .{}) catch break :create_index);
|
||||
defer index_dir.close();
|
||||
|
||||
bun.sys.symlinkat(final_path, index_dir, dest_name).unwrap() catch break :create_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -777,6 +777,7 @@ pub const Task = struct {
|
||||
}
|
||||
|
||||
const result = this.request.extract.tarball.run(
|
||||
&this.log,
|
||||
bytes,
|
||||
) catch |err| {
|
||||
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
||||
@@ -798,7 +799,7 @@ pub const Task = struct {
|
||||
if (Repository.tryHTTPS(url)) |https| break :brk Repository.download(
|
||||
manager.allocator,
|
||||
this.request.git_clone.env,
|
||||
manager.log,
|
||||
&this.log,
|
||||
manager.getCacheDirectory(),
|
||||
this.id,
|
||||
name,
|
||||
@@ -822,7 +823,7 @@ pub const Task = struct {
|
||||
} orelse if (Repository.trySSH(url)) |ssh| Repository.download(
|
||||
manager.allocator,
|
||||
this.request.git_clone.env,
|
||||
manager.log,
|
||||
&this.log,
|
||||
manager.getCacheDirectory(),
|
||||
this.id,
|
||||
name,
|
||||
@@ -846,7 +847,7 @@ pub const Task = struct {
|
||||
const data = Repository.checkout(
|
||||
manager.allocator,
|
||||
this.request.git_checkout.env,
|
||||
manager.log,
|
||||
&this.log,
|
||||
manager.getCacheDirectory(),
|
||||
git_checkout.repo_dir.stdDir(),
|
||||
git_checkout.name.slice(),
|
||||
@@ -897,6 +898,7 @@ pub const Task = struct {
|
||||
&this.request.local_tarball.tarball,
|
||||
tarball_path,
|
||||
normalize,
|
||||
&this.log,
|
||||
) catch |err| {
|
||||
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
||||
|
||||
@@ -918,13 +920,14 @@ pub const Task = struct {
|
||||
tarball: *const ExtractTarball,
|
||||
tarball_path: string,
|
||||
normalize: bool,
|
||||
log: *logger.Log,
|
||||
) !ExtractData {
|
||||
const bytes = if (normalize)
|
||||
try File.readFromUserInput(std.fs.cwd(), tarball_path, allocator).unwrap()
|
||||
else
|
||||
try File.readFrom(bun.FD.cwd(), tarball_path, allocator).unwrap();
|
||||
defer allocator.free(bytes);
|
||||
return tarball.run(bytes);
|
||||
return tarball.run(log, bytes);
|
||||
}
|
||||
|
||||
pub const Tag = enum(u3) {
|
||||
@@ -2164,7 +2167,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type {
|
||||
const basename = std.fs.path.basename(unintall_task.absolute_path);
|
||||
|
||||
var dir = bun.openDirA(std.fs.cwd(), dirname) catch |err| {
|
||||
if (comptime Environment.isDebug) {
|
||||
if (comptime Environment.isDebug or Environment.enable_asan) {
|
||||
Output.debugWarn("Failed to delete {s}: {s}", .{ unintall_task.absolute_path, @errorName(err) });
|
||||
}
|
||||
return;
|
||||
@@ -2172,7 +2175,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type {
|
||||
defer bun.FD.fromStdDir(dir).close();
|
||||
|
||||
dir.deleteTree(basename) catch |err| {
|
||||
if (comptime Environment.isDebug) {
|
||||
if (comptime Environment.isDebug or Environment.enable_asan) {
|
||||
Output.debugWarn("Failed to delete {s} in {s}: {s}", .{ basename, dirname, @errorName(err) });
|
||||
}
|
||||
};
|
||||
@@ -3335,7 +3338,7 @@ pub const PackageManager = struct {
|
||||
};
|
||||
|
||||
if (PackageManager.verbose_install and manager.pendingTaskCount() > 0) {
|
||||
if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{closure.manager.pendingTaskCount()});
|
||||
if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n{}", .{ closure.manager.pendingTaskCount(), bun.http.Stats.fmt() });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6565,12 +6568,14 @@ pub const PackageManager = struct {
|
||||
|
||||
entry.value_ptr.manifest.pkg.public_max_age = timestamp_this_tick.?;
|
||||
|
||||
Npm.PackageManifest.Serializer.saveAsync(
|
||||
&entry.value_ptr.manifest,
|
||||
manager.scopeForPackageName(name.slice()),
|
||||
manager.getTemporaryDirectory(),
|
||||
manager.getCacheDirectory(),
|
||||
);
|
||||
if (manager.options.enable.manifest_cache) {
|
||||
Npm.PackageManifest.Serializer.saveAsync(
|
||||
&entry.value_ptr.manifest,
|
||||
manager.scopeForPackageName(name.slice()),
|
||||
manager.getTemporaryDirectory(),
|
||||
manager.getCacheDirectory(),
|
||||
);
|
||||
}
|
||||
|
||||
if (@hasField(@TypeOf(callbacks), "manifests_only") and callbacks.manifests_only) {
|
||||
continue;
|
||||
@@ -6780,6 +6785,10 @@ pub const PackageManager = struct {
|
||||
|
||||
if (task.log.msgs.items.len > 0) {
|
||||
try task.log.print(Output.errorWriter());
|
||||
if (task.log.errors > 0) {
|
||||
manager.any_failed_to_install = true;
|
||||
}
|
||||
task.log.deinit();
|
||||
}
|
||||
|
||||
switch (task.tag) {
|
||||
@@ -14377,7 +14386,7 @@ pub const PackageManager = struct {
|
||||
if (PackageManager.verbose_install and closure.manager.pendingTaskCount() > 0) {
|
||||
const pending_task_count = closure.manager.pendingTaskCount();
|
||||
if (pending_task_count > 0 and PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) {
|
||||
Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{pending_task_count});
|
||||
Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n{}", .{ pending_task_count, bun.http.Stats.fmt() });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14532,12 +14541,16 @@ pub const PackageManager = struct {
|
||||
const log_level = manager.options.log_level;
|
||||
|
||||
// Start resolving DNS for the default registry immediately.
|
||||
if (manager.options.scope.url.hostname.len > 0) {
|
||||
var hostname_stack = std.heap.stackFallback(512, ctx.allocator);
|
||||
const allocator = hostname_stack.get();
|
||||
const hostname = try allocator.dupeZ(u8, manager.options.scope.url.hostname);
|
||||
defer allocator.free(hostname);
|
||||
bun.dns.internal.prefetch(manager.event_loop.loop(), hostname);
|
||||
// Unless you're behind a proxy.
|
||||
if (!manager.env.hasHTTPProxy()) {
|
||||
// And don't try to resolve DNS if it's an IP address.
|
||||
if (manager.options.scope.url.hostname.len > 0 and !manager.options.scope.url.isIPAddress()) {
|
||||
var hostname_stack = std.heap.stackFallback(512, ctx.allocator);
|
||||
const allocator = hostname_stack.get();
|
||||
const hostname = try allocator.dupeZ(u8, manager.options.scope.url.hostname);
|
||||
defer allocator.free(hostname);
|
||||
bun.dns.internal.prefetch(manager.event_loop.loop(), hostname, manager.options.scope.url.getPortAuto());
|
||||
}
|
||||
}
|
||||
|
||||
var load_result: Lockfile.LoadResult = if (manager.options.do.load_lockfile)
|
||||
@@ -14999,7 +15012,7 @@ pub const PackageManager = struct {
|
||||
const pending_tasks = this.pendingTaskCount();
|
||||
|
||||
if (PackageManager.verbose_install and pending_tasks > 0) {
|
||||
if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{pending_tasks});
|
||||
if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks{}\n", .{ pending_tasks, bun.http.Stats.fmt() });
|
||||
}
|
||||
|
||||
return pending_tasks == 0;
|
||||
|
||||
@@ -3189,6 +3189,7 @@ pub const Parser = struct {
|
||||
};
|
||||
var buffered_writer = std.io.bufferedWriter(writer);
|
||||
const actual = buffered_writer.writer();
|
||||
|
||||
for (self.log.msgs.items) |msg| {
|
||||
var m: logger.Msg = msg;
|
||||
m.writeFormat(actual, true) catch {};
|
||||
|
||||
@@ -632,12 +632,16 @@ pub const Log = struct {
|
||||
errors: u32 = 0,
|
||||
msgs: std.ArrayList(Msg),
|
||||
level: Level = if (Environment.isDebug) Level.info else Level.warn,
|
||||
|
||||
clone_line_text: bool = false,
|
||||
|
||||
/// This data structure is not thread-safe, but in nearly all cases we already clone the logs
|
||||
/// Unless you're in `bun install`, there are a couple spots.
|
||||
/// Generally, you want to write to a local logger.Log and then clone it into the parent's list at the end.
|
||||
lock: bun.Mutex = .{},
|
||||
|
||||
pub fn memoryCost(this: *const Log) usize {
|
||||
var cost: usize = 0;
|
||||
for (this.msgs.items) |msg| {
|
||||
for (this.msgs.items) |*msg| {
|
||||
cost += msg.memoryCost();
|
||||
}
|
||||
return cost;
|
||||
@@ -648,6 +652,8 @@ pub const Log = struct {
|
||||
}
|
||||
|
||||
pub fn reset(this: *Log) void {
|
||||
this.lock.lock();
|
||||
defer this.lock.unlock();
|
||||
this.msgs.clearRetainingCapacity();
|
||||
this.warnings = 0;
|
||||
this.errors = 0;
|
||||
@@ -821,11 +827,27 @@ pub const Log = struct {
|
||||
}
|
||||
|
||||
pub fn appendTo(self: *Log, other: *Log) OOM!void {
|
||||
try self.cloneTo(other);
|
||||
self.msgs.clearAndFree();
|
||||
{
|
||||
self.lock.lock();
|
||||
other.lock.lock();
|
||||
defer self.lock.unlock();
|
||||
defer other.lock.unlock();
|
||||
try self.cloneTo(other);
|
||||
}
|
||||
|
||||
{
|
||||
self.lock.lock();
|
||||
defer self.lock.unlock();
|
||||
self.msgs.clearAndFree();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cloneToWithRecycled(self: *Log, other: *Log, recycled: bool) OOM!void {
|
||||
self.lock.lock();
|
||||
other.lock.lock();
|
||||
defer self.lock.unlock();
|
||||
defer other.lock.unlock();
|
||||
|
||||
try other.msgs.appendSlice(self.msgs.items);
|
||||
other.warnings += self.warnings;
|
||||
other.errors += self.errors;
|
||||
@@ -856,7 +878,12 @@ pub const Log = struct {
|
||||
|
||||
pub fn appendToWithRecycled(self: *Log, other: *Log, recycled: bool) OOM!void {
|
||||
try self.cloneToWithRecycled(other, recycled);
|
||||
self.msgs.clearAndFree();
|
||||
|
||||
{
|
||||
self.lock.lock();
|
||||
defer self.lock.unlock();
|
||||
self.msgs.clearAndFree();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn appendToMaybeRecycled(self: *Log, other: *Log, source: *const Source) OOM!void {
|
||||
@@ -864,6 +891,17 @@ pub const Log = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(log: *Log) void {
|
||||
if (Environment.isDebug or Environment.enable_asan) {
|
||||
if (!log.lock.tryLock()) {
|
||||
@panic("Cannot deinit the log while a thread is active");
|
||||
}
|
||||
}
|
||||
defer {
|
||||
if (Environment.isDebug or Environment.enable_asan) {
|
||||
log.lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
log.msgs.clearAndFree();
|
||||
// log.warnings = 0;
|
||||
// log.errors = 0;
|
||||
@@ -1240,7 +1278,7 @@ pub const Log = struct {
|
||||
pub fn addRangeDebugWithNotes(log: *Log, source: ?*const Source, r: Range, text: string, notes: []Data) OOM!void {
|
||||
@branchHint(.cold);
|
||||
if (!Kind.shouldPrint(.debug, log.level)) return;
|
||||
// log.de += 1;
|
||||
|
||||
try log.addMsg(.{
|
||||
.kind = Kind.debug,
|
||||
.data = rangeData(source, r, text),
|
||||
@@ -1270,6 +1308,8 @@ pub const Log = struct {
|
||||
}
|
||||
|
||||
pub fn addMsg(self: *Log, msg: Msg) OOM!void {
|
||||
self.lock.lock();
|
||||
defer self.lock.unlock();
|
||||
try self.msgs.append(msg);
|
||||
}
|
||||
|
||||
@@ -1315,14 +1355,17 @@ pub const Log = struct {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn print(self: *const Log, to: anytype) !void {
|
||||
pub fn print(self: *Log, to: anytype) !void {
|
||||
return switch (Output.enable_ansi_colors) {
|
||||
inline else => |enable_ansi_colors| self.printWithEnableAnsiColors(to, enable_ansi_colors),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn printWithEnableAnsiColors(self: *const Log, to: anytype, comptime enable_ansi_colors: bool) !void {
|
||||
pub fn printWithEnableAnsiColors(self: *Log, to: anytype, comptime enable_ansi_colors: bool) !void {
|
||||
var needs_newline = false;
|
||||
self.lock.lock();
|
||||
defer self.lock.unlock();
|
||||
|
||||
if (self.warnings > 0 and self.errors > 0) {
|
||||
// Print warnings at the top
|
||||
// errors at the bottom
|
||||
|
||||
18
src/sys.zig
18
src/sys.zig
@@ -2390,7 +2390,7 @@ pub fn renameatConcurrently(
|
||||
from: [:0]const u8,
|
||||
to_dir_fd: bun.FileDescriptor,
|
||||
to: [:0]const u8,
|
||||
comptime opts: struct { move_fallback: bool = false },
|
||||
opts: struct { move_fallback: bool = false },
|
||||
) Maybe(void) {
|
||||
switch (renameatConcurrentlyWithoutFallback(from_dir_fd, from, to_dir_fd, to)) {
|
||||
.result => return Maybe(void).success,
|
||||
@@ -2413,7 +2413,7 @@ pub fn renameatConcurrentlyWithoutFallback(
|
||||
var did_atomically_replace = false;
|
||||
|
||||
attempt_atomic_rename_and_fallback_to_racy_delete: {
|
||||
{
|
||||
if (!bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_ATOMIC_RENAME")) {
|
||||
// Happy path: the folder doesn't exist in the cache dir, so we can
|
||||
// just rename it. We don't need to delete anything.
|
||||
var err = switch (renameat2(from_dir_fd, from, to_dir_fd, to, .{
|
||||
@@ -2467,6 +2467,16 @@ pub fn renameat2(from_dir: bun.FileDescriptor, from: [:0]const u8, to_dir: bun.F
|
||||
return renameat(from_dir, from, to_dir, to);
|
||||
}
|
||||
|
||||
if (bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_DISABLE_RENAMEAT2")) {
|
||||
@branchHint(.unlikely);
|
||||
return .{
|
||||
.err = .{
|
||||
.errno = @intFromEnum(E.NOSYS),
|
||||
.syscall = .rename,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const rc = switch (comptime Environment.os) {
|
||||
.linux => std.os.linux.renameat2(@intCast(from_dir.cast()), from.ptr, @intCast(to_dir.cast()), to.ptr, flags.int()),
|
||||
@@ -4779,8 +4789,8 @@ pub fn copyFileZSlowWithHandle(in_handle: bun.FileDescriptor, to_dir: bun.FileDe
|
||||
}
|
||||
|
||||
if (comptime Environment.isPosix) {
|
||||
_ = bun.c.fchmod(out_handle.cast(), stat_.mode);
|
||||
_ = bun.c.fchown(out_handle.cast(), stat_.uid, stat_.gid);
|
||||
_ = bun.sys.fchmod(out_handle, @intCast(stat_.mode));
|
||||
_ = bun.sys.fchown(out_handle, @intCast(stat_.uid), @intCast(stat_.gid));
|
||||
}
|
||||
|
||||
return Maybe(void).success;
|
||||
|
||||
@@ -153,6 +153,10 @@ pub const URL = struct {
|
||||
return if (this.isHTTPS()) @as(u16, 443) else @as(u16, 80);
|
||||
}
|
||||
|
||||
pub fn isIPAddress(this: *const URL) bool {
|
||||
return bun.strings.isIPAddress(this.hostname);
|
||||
}
|
||||
|
||||
pub fn hasValidPort(this: *const URL) bool {
|
||||
return (this.getPort() orelse 0) > 0;
|
||||
}
|
||||
|
||||
@@ -34,14 +34,14 @@ const words: Record<string, { reason: string; limit?: number; regex?: boolean }>
|
||||
|
||||
[String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 240, regex: true },
|
||||
"usingnamespace": { reason: "Zig 0.15 will remove `usingnamespace`" },
|
||||
"catch unreachable": { reason: "For out-of-memory, prefer 'catch bun.outOfMemory()'", limit: 1849 },
|
||||
"catch unreachable": { reason: "For out-of-memory, prefer 'catch bun.outOfMemory()'", limit: 1852 },
|
||||
|
||||
"std.fs.Dir": { reason: "Prefer bun.sys + bun.FD instead of std.fs", limit: 180 },
|
||||
"std.fs.cwd": { reason: "Prefer bun.FD.cwd()", limit: 103 },
|
||||
"std.fs.File": { reason: "Prefer bun.sys + bun.FD instead of std.fs", limit: 64 },
|
||||
".stdFile()": { reason: "Prefer bun.sys + bun.FD instead of std.fs.File. Zig hides 'errno' when Bun wants to match libuv", limit: 18 },
|
||||
".stdDir()": { reason: "Prefer bun.sys + bun.FD instead of std.fs.File. Zig hides 'errno' when Bun wants to match libuv", limit: 48 },
|
||||
".arguments_old(": { reason: "Please migrate to .argumentsAsArray() or another argument API", limit: 286 },
|
||||
".arguments_old(": { reason: "Please migrate to .argumentsAsArray() or another argument API", limit: 285 },
|
||||
"// autofix": { reason: "Evaluate if this variable should be deleted entirely or explicitly discarded.", limit: 176 },
|
||||
};
|
||||
const words_keys = [...Object.keys(words)];
|
||||
|
||||
@@ -4,7 +4,7 @@ import { describe, expect, it } from "bun:test";
|
||||
describe("dns.prefetch", () => {
|
||||
it("should prefetch", async () => {
|
||||
const currentStats = dns.getCacheStats();
|
||||
dns.prefetch("example.com");
|
||||
dns.prefetch("example.com", 80);
|
||||
await Bun.sleep(32);
|
||||
|
||||
// Must set keepalive: false to ensure it doesn't reuse the socket.
|
||||
|
||||
109
test/js/web/fetch/fetch-stats.test.ts
Normal file
109
test/js/web/fetch/fetch-stats.test.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import { describe, expect, it } from "bun:test";
|
||||
import "harness";
|
||||
|
||||
describe("fetch.stats", () => {
|
||||
it("tracks request statistics", async () => {
|
||||
// Save initial stats
|
||||
const initialStats = {
|
||||
requests: fetch.stats.requests,
|
||||
bytesWritten: fetch.stats.bytesWritten,
|
||||
bytesRead: fetch.stats.bytesRead,
|
||||
success: fetch.stats.success,
|
||||
active: fetch.stats.active,
|
||||
fail: fetch.stats.fail,
|
||||
redirect: fetch.stats.redirect,
|
||||
timeout: fetch.stats.timeout,
|
||||
refused: fetch.stats.refused,
|
||||
};
|
||||
|
||||
// Start a server
|
||||
const responseBody = "Hello, World!";
|
||||
const requestBody = "Test request body";
|
||||
|
||||
using server = Bun.serve({
|
||||
port: 0, // Use any available port
|
||||
fetch(req) {
|
||||
return new Response(responseBody, {
|
||||
headers: { "Content-Type": "text/plain" },
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
// Make a fetch request with a body
|
||||
const response = await fetch(server.url, {
|
||||
method: "POST",
|
||||
body: requestBody,
|
||||
});
|
||||
|
||||
const responseText = await response.text();
|
||||
expect(responseText).toBe(responseBody);
|
||||
|
||||
// Verify stats were updated
|
||||
expect(fetch.stats.requests).toBe(initialStats.requests + 1);
|
||||
expect(fetch.stats.success).toBe(initialStats.success + 1);
|
||||
expect(fetch.stats.bytesWritten).toBeGreaterThan(initialStats.bytesWritten);
|
||||
expect(fetch.stats.bytesRead).toBeGreaterThan(initialStats.bytesRead);
|
||||
|
||||
// Active should return to the same value after request completes
|
||||
expect(fetch.stats.active).toBe(initialStats.active);
|
||||
});
|
||||
|
||||
it("tracks multiple concurrent requests", async () => {
|
||||
const initialActive = fetch.stats.active;
|
||||
const initialRequests = fetch.stats.requests;
|
||||
|
||||
// Start a server that delays responses
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
await Bun.sleep(50); // Small delay to ensure concurrent requests
|
||||
return new Response("OK");
|
||||
},
|
||||
});
|
||||
|
||||
// Start multiple requests without awaiting them
|
||||
const requests = Array.from({ length: 5 }, () => fetch(server.url).then(r => r.blob()));
|
||||
|
||||
// Check active requests increased
|
||||
expect(fetch.stats.active).toBeGreaterThan(initialActive);
|
||||
expect(fetch.stats.requests).toBe(initialRequests + 5);
|
||||
|
||||
// Wait for all requests to complete
|
||||
await Promise.all(requests);
|
||||
|
||||
// Active should return to initial value
|
||||
expect(fetch.stats.active).toBe(initialActive);
|
||||
});
|
||||
|
||||
it("tracks failed requests", async () => {
|
||||
const initialFail = fetch.stats.fail;
|
||||
|
||||
// Try to connect to a non-existent server
|
||||
try {
|
||||
await fetch("http://localhost:54321");
|
||||
} catch (error) {
|
||||
// Expected to fail
|
||||
}
|
||||
|
||||
expect(fetch.stats.fail).toBe(initialFail + 1);
|
||||
});
|
||||
|
||||
it("has all expected properties", () => {
|
||||
const expectedProperties = [
|
||||
"requests",
|
||||
"bytesWritten",
|
||||
"bytesRead",
|
||||
"fail",
|
||||
"redirect",
|
||||
"success",
|
||||
"timeout",
|
||||
"refused",
|
||||
"active",
|
||||
] as const;
|
||||
|
||||
for (const prop of expectedProperties) {
|
||||
expect(fetch.stats).toHaveProperty(prop);
|
||||
expect(fetch.stats[prop]).toBeTypeOf("number");
|
||||
}
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user