mirror of
https://github.com/oven-sh/bun
synced 2026-02-16 13:51:47 +00:00
Add S3 upload support for Bun build with flexible configuration
This commit is contained in:
@@ -517,7 +517,7 @@ pub const JSBundler = struct {
|
||||
|
||||
this.s3 = S3Config{
|
||||
.url = url,
|
||||
.credentials = credentials.toHeap(),
|
||||
.credentials = if (credentials.changed_credentials) credentials.credentials.dupe() else null,
|
||||
};
|
||||
} else {
|
||||
return globalThis.throwInvalidArguments("Expected s3 to be a string URL or object with credentials", .{});
|
||||
|
||||
@@ -1,62 +1,4 @@
|
||||
// Generated with scripts/generate-perf-trace-events.sh
|
||||
// clang-format off
|
||||
#define FOR_EACH_TRACE_EVENT(macro) \
|
||||
macro(Bundler.BindImportsToExports, 0) \
|
||||
macro(Bundler.CloneLinkerGraph, 1) \
|
||||
macro(Bundler.CreateNamespaceExports, 2) \
|
||||
macro(Bundler.FigureOutCommonJS, 3) \
|
||||
macro(Bundler.MatchImportsWithExports, 4) \
|
||||
macro(Bundler.ParseJS, 5) \
|
||||
macro(Bundler.ParseJSON, 6) \
|
||||
macro(Bundler.ParseTOML, 7) \
|
||||
macro(Bundler.ResolveExportStarStatements, 8) \
|
||||
macro(Bundler.Worker.create, 9) \
|
||||
macro(Bundler.WrapDependencies, 10) \
|
||||
macro(Bundler.breakOutputIntoPieces, 11) \
|
||||
macro(Bundler.cloneAST, 12) \
|
||||
macro(Bundler.computeChunks, 13) \
|
||||
macro(Bundler.findAllImportedPartsInJSOrder, 14) \
|
||||
macro(Bundler.findReachableFiles, 15) \
|
||||
macro(Bundler.generateChunksInParallel, 16) \
|
||||
macro(Bundler.generateCodeForFileInChunkCss, 17) \
|
||||
macro(Bundler.generateCodeForFileInChunkJS, 18) \
|
||||
macro(Bundler.generateIsolatedHash, 19) \
|
||||
macro(Bundler.generateSourceMapForChunk, 20) \
|
||||
macro(Bundler.markFileLiveForTreeShaking, 21) \
|
||||
macro(Bundler.markFileReachableForCodeSplitting, 22) \
|
||||
macro(Bundler.onParseTaskComplete, 23) \
|
||||
macro(Bundler.postProcessJSChunk, 24) \
|
||||
macro(Bundler.readFile, 25) \
|
||||
macro(Bundler.renameSymbolsInChunk, 26) \
|
||||
macro(Bundler.scanImportsAndExports, 27) \
|
||||
macro(Bundler.treeShakingAndCodeSplitting, 28) \
|
||||
macro(Bundler.writeChunkToDisk, 29) \
|
||||
macro(Bundler.writeOutputFilesToDisk, 30) \
|
||||
macro(ExtractTarball.extract, 31) \
|
||||
macro(FolderResolver.readPackageJSONFromDisk.folder, 32) \
|
||||
macro(FolderResolver.readPackageJSONFromDisk.workspace, 33) \
|
||||
macro(JSBundler.addPlugin, 34) \
|
||||
macro(JSBundler.hasAnyMatches, 35) \
|
||||
macro(JSBundler.matchOnLoad, 36) \
|
||||
macro(JSBundler.matchOnResolve, 37) \
|
||||
macro(JSGlobalObject.create, 38) \
|
||||
macro(JSParser.analyze, 39) \
|
||||
macro(JSParser.parse, 40) \
|
||||
macro(JSParser.postvisit, 41) \
|
||||
macro(JSParser.visit, 42) \
|
||||
macro(JSPrinter.print, 43) \
|
||||
macro(JSPrinter.printWithSourceMap, 44) \
|
||||
macro(ModuleResolver.resolve, 45) \
|
||||
macro(PackageInstaller.install, 46) \
|
||||
macro(PackageInstaller.installPatch, 47) \
|
||||
macro(PackageManifest.Serializer.loadByFile, 48) \
|
||||
macro(PackageManifest.Serializer.save, 49) \
|
||||
macro(RuntimeTranspilerCache.fromFile, 50) \
|
||||
macro(RuntimeTranspilerCache.save, 51) \
|
||||
macro(RuntimeTranspilerCache.toFile, 52) \
|
||||
macro(StandaloneModuleGraph.serialize, 53) \
|
||||
macro(Symbols.followAll, 54) \
|
||||
macro(TestCommand.printCodeCoverageLCov, 55) \
|
||||
macro(TestCommand.printCodeCoverageLCovAndText, 56) \
|
||||
macro(TestCommand.printCodeCoverageText, 57) \
|
||||
// end
|
||||
|
||||
@@ -1333,7 +1333,6 @@ pub const LinkerContext = struct {
|
||||
}
|
||||
|
||||
pub const writeOutputFilesToDisk = @import("linker_context/writeOutputFilesToDisk.zig").writeOutputFilesToDisk;
|
||||
pub const writeOutputFilesToS3 = @import("linker_context/writeOutputFilesToDisk.zig").writeOutputFilesToS3;
|
||||
|
||||
// Sort cross-chunk exports by chunk name for determinism
|
||||
pub fn sortedCrossChunkExportItems(
|
||||
|
||||
@@ -1747,6 +1747,29 @@ pub const BundleV2 = struct {
|
||||
.value => |*build| {
|
||||
const root_obj = JSC.JSValue.createEmptyObject(globalThis, 3);
|
||||
const output_files: []options.OutputFile = build.output_files.items;
|
||||
|
||||
// Check if S3 upload is configured
|
||||
if (this.s3_config) |s3_config| {
|
||||
if (s3_config.url.len > 0) {
|
||||
@import("./s3_upload.zig").uploadOutputFilesToS3(
|
||||
output_files,
|
||||
s3_config.url,
|
||||
s3_config.credentials,
|
||||
globalThis,
|
||||
) catch |err| {
|
||||
// Add error to log
|
||||
this.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "S3 upload failed: {s}", .{@errorName(err)}) catch {};
|
||||
|
||||
// Still return the build result but with the error logged
|
||||
root_obj.put(
|
||||
globalThis,
|
||||
JSC.ZigString.static("s3_error"),
|
||||
JSC.ZigString.init(@errorName(err)).toJS(globalThis),
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const output_files_js = JSC.JSValue.createEmptyArray(globalThis, output_files.len) catch return promise.reject(globalThis, error.JSError);
|
||||
if (output_files_js == .zero) {
|
||||
@panic("Unexpected pending JavaScript exception in JSBundleCompletionTask.onComplete. This is a bug in Bun.");
|
||||
|
||||
@@ -340,7 +340,9 @@ pub fn generateChunksInParallel(c: *LinkerContext, chunks: []Chunk, comptime is_
|
||||
return error.MultipleOutputFilesWithoutOutputDir;
|
||||
}
|
||||
|
||||
if (root_path.len > 0) {
|
||||
// Check if S3 upload is requested - in that case, we skip writing to disk
|
||||
// The actual S3 upload will be handled in the completion task where globalThis is available
|
||||
if (c.resolver.opts.s3_url == null and root_path.len > 0) {
|
||||
try c.writeOutputFilesToDisk(root_path, chunks, &output_files);
|
||||
} else {
|
||||
// In-memory build
|
||||
|
||||
@@ -411,359 +411,6 @@ pub fn writeOutputFilesToDisk(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeOutputFilesToS3(
|
||||
c: *LinkerContext,
|
||||
s3_url: []const u8,
|
||||
s3_credentials: ?*bun.S3.S3Credentials,
|
||||
chunks: []Chunk,
|
||||
output_files: *std.ArrayList(options.OutputFile),
|
||||
globalThis: *JSC.JSGlobalObject,
|
||||
) !void {
|
||||
const trace = bun.perf.trace("Bundler.writeOutputFilesToS3");
|
||||
defer trace.end();
|
||||
|
||||
// Parse S3 URL to extract bucket and prefix
|
||||
var bucket: []const u8 = "";
|
||||
var prefix: []const u8 = "";
|
||||
if (strings.hasPrefixComptime(s3_url, "s3://")) {
|
||||
const url_without_protocol = s3_url[5..];
|
||||
if (strings.indexOfChar(url_without_protocol, '/')) |slash_index| {
|
||||
bucket = url_without_protocol[0..slash_index];
|
||||
prefix = url_without_protocol[slash_index + 1 ..];
|
||||
} else {
|
||||
bucket = url_without_protocol;
|
||||
}
|
||||
} else {
|
||||
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "Invalid S3 URL format: {s}. Expected s3://bucket/prefix", .{s3_url}) catch unreachable;
|
||||
return error.InvalidS3URL;
|
||||
}
|
||||
|
||||
// Get or create S3 credentials
|
||||
const credentials = s3_credentials orelse brk: {
|
||||
const env_creds = globalThis.bunVM().transpiler.env.getS3Credentials();
|
||||
if (env_creds.accessKeyId.len == 0 or env_creds.secretAccessKey.len == 0) {
|
||||
c.log.addError(null, Logger.Loc.Empty, "Missing S3 credentials. Set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables") catch unreachable;
|
||||
return error.MissingS3Credentials;
|
||||
}
|
||||
var creds = env_creds.dupe();
|
||||
if (creds.bucket.len == 0) {
|
||||
creds.bucket = bucket;
|
||||
}
|
||||
break :brk creds;
|
||||
};
|
||||
defer if (s3_credentials == null) credentials.deref();
|
||||
|
||||
// First, generate all content in memory
|
||||
var memory_files = std.ArrayList(MemoryFile).init(bun.default_allocator);
|
||||
defer {
|
||||
for (memory_files.items) |*file| {
|
||||
file.deinit();
|
||||
}
|
||||
memory_files.deinit();
|
||||
}
|
||||
|
||||
// Generate content for all chunks
|
||||
for (chunks) |*chunk| {
|
||||
const trace2 = bun.perf.trace("Bundler.generateChunkForS3");
|
||||
defer trace2.end();
|
||||
|
||||
var display_size: usize = 0;
|
||||
var code_result = chunk.intermediate_output.code(
|
||||
bun.default_allocator,
|
||||
c.parse_graph,
|
||||
&c.graph,
|
||||
c.resolver.opts.public_path,
|
||||
chunk,
|
||||
chunks,
|
||||
&display_size,
|
||||
chunk.content.sourcemap(c.options.source_maps) != .none,
|
||||
) catch |err| bun.Output.panic("Failed to create output chunk: {s}", .{@errorName(err)});
|
||||
|
||||
const input_path = try bun.default_allocator.dupe(
|
||||
u8,
|
||||
if (chunk.entry_point.is_entry_point)
|
||||
c.parse_graph.input_files.items(.source)[chunk.entry_point.source_index].path.text
|
||||
else
|
||||
chunk.final_rel_path,
|
||||
);
|
||||
|
||||
// Prepare S3 path
|
||||
const s3_path = if (prefix.len > 0)
|
||||
try std.fmt.allocPrint(bun.default_allocator, "{s}/{s}", .{ prefix, chunk.final_rel_path })
|
||||
else
|
||||
try bun.default_allocator.dupe(u8, chunk.final_rel_path);
|
||||
|
||||
// Store the main file content
|
||||
try memory_files.append(.{
|
||||
.path = s3_path,
|
||||
.content = code_result.buffer,
|
||||
.content_type = switch (chunk.content) {
|
||||
.javascript => "application/javascript",
|
||||
.css => "text/css",
|
||||
.html => "text/html",
|
||||
},
|
||||
});
|
||||
|
||||
// Handle source maps
|
||||
switch (chunk.content.sourcemap(c.options.source_maps)) {
|
||||
.external, .linked => |tag| {
|
||||
const output_source_map = chunk.output_source_map.finalize(bun.default_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
|
||||
const source_map_path = try std.fmt.allocPrint(bun.default_allocator, "{s}.map", .{s3_path});
|
||||
|
||||
if (tag == .linked) {
|
||||
// Append source map URL to the code
|
||||
const a, const b = if (c.options.public_path.len > 0)
|
||||
cheapPrefixNormalizer(c.options.public_path, std.fs.path.basename(source_map_path))
|
||||
else
|
||||
.{ "", std.fs.path.basename(source_map_path) };
|
||||
|
||||
const source_map_url = try std.fmt.allocPrint(bun.default_allocator, "//# sourceMappingURL={s}{s}\n", .{ a, b });
|
||||
const new_content = try std.mem.concat(bun.default_allocator, u8, &.{ memory_files.items[memory_files.items.len - 1].content, source_map_url });
|
||||
memory_files.items[memory_files.items.len - 1].content = new_content;
|
||||
}
|
||||
|
||||
try memory_files.append(.{
|
||||
.path = source_map_path,
|
||||
.content = output_source_map,
|
||||
.content_type = "application/json",
|
||||
});
|
||||
},
|
||||
.@"inline" => {
|
||||
const output_source_map = chunk.output_source_map.finalize(bun.default_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
|
||||
const encode_len = base64.encodeLen(output_source_map);
|
||||
|
||||
const source_map_start = "//# sourceMappingURL=data:application/json;base64,";
|
||||
var encoded = try bun.default_allocator.alloc(u8, source_map_start.len + encode_len + 1);
|
||||
@memcpy(encoded[0..source_map_start.len], source_map_start);
|
||||
_ = base64.encode(encoded[source_map_start.len .. source_map_start.len + encode_len], output_source_map);
|
||||
encoded[encoded.len - 1] = '\n';
|
||||
|
||||
const new_content = try std.mem.concat(bun.default_allocator, u8, &.{ memory_files.items[memory_files.items.len - 1].content, encoded });
|
||||
memory_files.items[memory_files.items.len - 1].content = new_content;
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
|
||||
// TODO: Handle bytecode generation for S3
|
||||
}
|
||||
|
||||
// Add additional output files
|
||||
for (c.parse_graph.additional_output_files.items) |*src| {
|
||||
const s3_path = if (prefix.len > 0)
|
||||
try std.fmt.allocPrint(bun.default_allocator, "{s}/{s}", .{ prefix, src.dest_path })
|
||||
else
|
||||
try bun.default_allocator.dupe(u8, src.dest_path);
|
||||
|
||||
try memory_files.append(.{
|
||||
.path = s3_path,
|
||||
.content = src.value.buffer.bytes,
|
||||
.content_type = src.loader.toMimeType(&.{}),
|
||||
});
|
||||
}
|
||||
|
||||
// Now upload all files to S3
|
||||
Output.prettyln("<r><d>Uploading {d} files to S3...<r>", .{memory_files.items.len});
|
||||
|
||||
var upload_count: std.atomic.Value(usize) = std.atomic.Value(usize).init(0);
|
||||
var error_count: std.atomic.Value(usize) = std.atomic.Value(usize).init(0);
|
||||
|
||||
for (memory_files.items) |*file| {
|
||||
const task = bun.new(S3UploadTask, .{
|
||||
.credentials = credentials,
|
||||
.path = file.path,
|
||||
.content = file.content,
|
||||
.content_type = file.content_type,
|
||||
.upload_count = &upload_count,
|
||||
.error_count = &error_count,
|
||||
.globalThis = globalThis,
|
||||
});
|
||||
|
||||
// Start the upload
|
||||
credentials.ref();
|
||||
bun.S3.upload(
|
||||
credentials,
|
||||
file.path,
|
||||
file.content,
|
||||
file.content_type,
|
||||
null, // acl
|
||||
null, // proxy_url
|
||||
null, // storage_class
|
||||
S3UploadTask.onComplete,
|
||||
task,
|
||||
);
|
||||
}
|
||||
|
||||
// Wait for all uploads to complete
|
||||
while (upload_count.load(.acquire) < memory_files.items.len) {
|
||||
// Let the event loop process S3 callbacks
|
||||
if (globalThis.bunVM().tick()) {
|
||||
continue;
|
||||
}
|
||||
std.time.sleep(10 * std.time.ns_per_ms);
|
||||
}
|
||||
|
||||
if (error_count.load(.acquire) > 0) {
|
||||
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "Failed to upload {d} files to S3", .{error_count.load(.acquire)}) catch unreachable;
|
||||
return error.S3UploadFailed;
|
||||
}
|
||||
|
||||
Output.prettyln("<r><green>✓<r> Successfully uploaded {d} files to S3", .{memory_files.items.len});
|
||||
|
||||
// Build output files list for the result
|
||||
// We don't have actual file sizes from S3, so we use the content size
|
||||
var file_index: usize = 0;
|
||||
for (chunks) |*chunk| {
|
||||
const main_file = &memory_files.items[file_index];
|
||||
file_index += 1;
|
||||
|
||||
const input_path = try bun.default_allocator.dupe(
|
||||
u8,
|
||||
if (chunk.entry_point.is_entry_point)
|
||||
c.parse_graph.input_files.items(.source)[chunk.entry_point.source_index].path.text
|
||||
else
|
||||
chunk.final_rel_path,
|
||||
);
|
||||
|
||||
const output_kind = if (chunk.content == .css)
|
||||
.asset
|
||||
else if (chunk.entry_point.is_entry_point)
|
||||
c.graph.files.items(.entry_point_kind)[chunk.entry_point.source_index].outputKind()
|
||||
else
|
||||
.chunk;
|
||||
|
||||
var source_map_index: ?u32 = null;
|
||||
if (chunk.content.sourcemap(c.options.source_maps) == .external or
|
||||
chunk.content.sourcemap(c.options.source_maps) == .linked)
|
||||
{
|
||||
source_map_index = @as(u32, @truncate(output_files.items.len + 1));
|
||||
}
|
||||
|
||||
try output_files.append(options.OutputFile.init(.{
|
||||
.output_path = main_file.path,
|
||||
.input_path = input_path,
|
||||
.input_loader = if (chunk.entry_point.is_entry_point)
|
||||
c.parse_graph.input_files.items(.loader)[chunk.entry_point.source_index]
|
||||
else
|
||||
.js,
|
||||
.hash = chunk.template.placeholder.hash,
|
||||
.output_kind = output_kind,
|
||||
.loader = .js,
|
||||
.source_map_index = source_map_index,
|
||||
.bytecode_index = null,
|
||||
.size = @as(u32, @truncate(main_file.content.len)),
|
||||
.display_size = @as(u32, @truncate(main_file.content.len)),
|
||||
.is_executable = chunk.is_executable,
|
||||
.data = .{ .buffer = .{
|
||||
.allocator = bun.default_allocator,
|
||||
.bytes = main_file.content,
|
||||
} },
|
||||
.side = if (chunk.content == .css)
|
||||
.client
|
||||
else switch (c.graph.ast.items(.target)[chunk.entry_point.source_index]) {
|
||||
.browser => .client,
|
||||
else => .server,
|
||||
},
|
||||
.entry_point_index = if (output_kind == .@"entry-point")
|
||||
chunk.entry_point.source_index - @as(u32, (if (c.framework) |fw| if (fw.server_components != null) 3 else 1 else 1))
|
||||
else
|
||||
null,
|
||||
.referenced_css_files = switch (chunk.content) {
|
||||
.javascript => |js| @ptrCast(try bun.default_allocator.dupe(u32, js.css_chunks)),
|
||||
.css => &.{},
|
||||
.html => &.{},
|
||||
},
|
||||
}));
|
||||
|
||||
// Add source map output file if exists
|
||||
if (source_map_index != null and
|
||||
(chunk.content.sourcemap(c.options.source_maps) == .external or
|
||||
chunk.content.sourcemap(c.options.source_maps) == .linked))
|
||||
{
|
||||
const source_map_file = &memory_files.items[file_index];
|
||||
file_index += 1;
|
||||
|
||||
try output_files.append(options.OutputFile.init(.{
|
||||
.output_path = source_map_file.path,
|
||||
.input_path = try strings.concat(bun.default_allocator, &.{ input_path, ".map" }),
|
||||
.loader = .json,
|
||||
.input_loader = .file,
|
||||
.output_kind = .sourcemap,
|
||||
.size = @as(u32, @truncate(source_map_file.content.len)),
|
||||
.data = .{ .buffer = .{
|
||||
.allocator = bun.default_allocator,
|
||||
.bytes = source_map_file.content,
|
||||
} },
|
||||
.side = .client,
|
||||
.entry_point_index = null,
|
||||
.is_executable = false,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
// Add additional output files
|
||||
for (c.parse_graph.additional_output_files.items) |*src| {
|
||||
const file = &memory_files.items[file_index];
|
||||
file_index += 1;
|
||||
|
||||
try output_files.append(options.OutputFile.init(.{
|
||||
.output_path = file.path,
|
||||
.input_path = src.src_path.text,
|
||||
.input_loader = src.loader,
|
||||
.loader = src.loader,
|
||||
.output_kind = src.output_kind,
|
||||
.size = @as(u32, @truncate(file.content.len)),
|
||||
.data = .{ .buffer = .{
|
||||
.allocator = bun.default_allocator,
|
||||
.bytes = file.content,
|
||||
} },
|
||||
.side = src.side,
|
||||
.entry_point_index = src.entry_point_index,
|
||||
.is_executable = false,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
const MemoryFile = struct {
|
||||
path: []const u8,
|
||||
content: []const u8,
|
||||
content_type: []const u8,
|
||||
|
||||
pub fn deinit(self: *MemoryFile) void {
|
||||
bun.default_allocator.free(self.path);
|
||||
// Content is managed by the chunks/output files
|
||||
}
|
||||
};
|
||||
|
||||
const S3UploadTask = struct {
|
||||
credentials: *bun.S3.S3Credentials,
|
||||
path: []const u8,
|
||||
content: []const u8,
|
||||
content_type: []const u8,
|
||||
upload_count: *std.atomic.Value(usize),
|
||||
error_count: *std.atomic.Value(usize),
|
||||
globalThis: *JSC.JSGlobalObject,
|
||||
|
||||
pub fn onComplete(result: bun.S3.S3UploadResult, ctx: *anyopaque) void {
|
||||
const task: *S3UploadTask = @ptrCast(@alignCast(ctx));
|
||||
defer {
|
||||
task.credentials.deref();
|
||||
bun.destroy(task);
|
||||
}
|
||||
|
||||
switch (result) {
|
||||
.success => {
|
||||
_ = task.upload_count.fetchAdd(1, .release);
|
||||
Output.prettyln("<r><d> Uploaded: {s}<r>", .{task.path});
|
||||
},
|
||||
.failure => |err| {
|
||||
_ = task.error_count.fetchAdd(1, .release);
|
||||
_ = task.upload_count.fetchAdd(1, .release);
|
||||
Output.prettyErrorln("<r><red>Failed to upload {s}: {s}<r>", .{ task.path, err.message });
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const bun = @import("bun");
|
||||
const options = bun.options;
|
||||
const Loader = bun.Loader;
|
||||
|
||||
168
src/bundler/s3_upload.zig
Normal file
168
src/bundler/s3_upload.zig
Normal file
@@ -0,0 +1,168 @@
|
||||
const std = @import("std");
|
||||
const bun = @import("bun");
|
||||
const strings = bun.strings;
|
||||
const Output = bun.Output;
|
||||
const options = @import("../options.zig");
|
||||
const JSC = bun.JSC;
|
||||
|
||||
pub fn uploadOutputFilesToS3(
|
||||
output_files: []const options.OutputFile,
|
||||
s3_url: []const u8,
|
||||
s3_credentials: ?*bun.S3.S3Credentials,
|
||||
globalThis: *JSC.JSGlobalObject,
|
||||
) !void {
|
||||
// Parse S3 URL to extract bucket and prefix
|
||||
var bucket: []const u8 = "";
|
||||
var prefix: []const u8 = "";
|
||||
if (strings.hasPrefixComptime(s3_url, "s3://")) {
|
||||
const url_without_protocol = s3_url[5..];
|
||||
if (strings.indexOfChar(url_without_protocol, '/')) |slash_index| {
|
||||
bucket = url_without_protocol[0..slash_index];
|
||||
prefix = url_without_protocol[slash_index + 1 ..];
|
||||
} else {
|
||||
bucket = url_without_protocol;
|
||||
}
|
||||
} else {
|
||||
return error.InvalidS3URL;
|
||||
}
|
||||
|
||||
// Get or create S3 credentials
|
||||
const credentials = s3_credentials orelse brk: {
|
||||
const env = globalThis.bunVM().transpiler.env;
|
||||
const access_key_id = env.map.get("AWS_ACCESS_KEY_ID") orelse "";
|
||||
const secret_access_key = env.map.get("AWS_SECRET_ACCESS_KEY") orelse "";
|
||||
|
||||
if (access_key_id.len == 0 or secret_access_key.len == 0) {
|
||||
return error.MissingS3Credentials;
|
||||
}
|
||||
|
||||
const creds = bun.new(bun.S3.S3Credentials, .{
|
||||
.ref_count = .init(),
|
||||
.accessKeyId = bun.default_allocator.dupe(u8, access_key_id) catch unreachable,
|
||||
.secretAccessKey = bun.default_allocator.dupe(u8, secret_access_key) catch unreachable,
|
||||
.bucket = bun.default_allocator.dupe(u8, bucket) catch unreachable,
|
||||
.region = if (env.map.get("AWS_REGION")) |region|
|
||||
bun.default_allocator.dupe(u8, region) catch unreachable
|
||||
else
|
||||
"us-east-1",
|
||||
.endpoint = if (env.map.get("AWS_ENDPOINT_URL_S3")) |endpoint|
|
||||
bun.default_allocator.dupe(u8, endpoint) catch unreachable
|
||||
else
|
||||
"",
|
||||
.sessionToken = if (env.map.get("AWS_SESSION_TOKEN")) |token|
|
||||
bun.default_allocator.dupe(u8, token) catch unreachable
|
||||
else
|
||||
"",
|
||||
.insecure_http = false,
|
||||
.virtual_hosted_style = false,
|
||||
});
|
||||
creds.ref();
|
||||
break :brk creds;
|
||||
};
|
||||
defer if (s3_credentials == null) credentials.deref();
|
||||
|
||||
const total_files = output_files.len;
|
||||
Output.prettyln("<r><d>Uploading {d} files to S3...<r>", .{total_files});
|
||||
|
||||
var upload_state = S3UploadState{
|
||||
.total_count = total_files,
|
||||
.completed_count = 0,
|
||||
.error_count = 0,
|
||||
.globalThis = globalThis,
|
||||
.credentials = credentials,
|
||||
.prefix = prefix,
|
||||
};
|
||||
|
||||
// Create upload tasks for all files
|
||||
const tasks = bun.default_allocator.alloc(S3UploadTask, output_files.len) catch unreachable;
|
||||
defer bun.default_allocator.free(tasks);
|
||||
|
||||
for (output_files, 0..) |*output_file, i| {
|
||||
// Skip files without buffer data
|
||||
const content = switch (output_file.value) {
|
||||
.buffer => |buf| buf.bytes,
|
||||
else => continue,
|
||||
};
|
||||
|
||||
// Prepare S3 path
|
||||
const s3_path = if (prefix.len > 0)
|
||||
std.fmt.allocPrint(bun.default_allocator, "{s}/{s}", .{ prefix, output_file.dest_path }) catch unreachable
|
||||
else
|
||||
bun.default_allocator.dupe(u8, output_file.dest_path) catch unreachable;
|
||||
|
||||
const content_type = output_file.loader.toMimeType(&.{});
|
||||
|
||||
tasks[i] = .{
|
||||
.state = &upload_state,
|
||||
.path = s3_path,
|
||||
.content = content,
|
||||
.content_type = content_type.value,
|
||||
.index = i,
|
||||
};
|
||||
|
||||
// Start the upload
|
||||
credentials.ref();
|
||||
bun.S3.upload(
|
||||
credentials,
|
||||
s3_path,
|
||||
content,
|
||||
content_type.value,
|
||||
null, // acl
|
||||
null, // proxy_url
|
||||
null, // storage_class
|
||||
S3UploadTask.onComplete,
|
||||
&tasks[i],
|
||||
);
|
||||
}
|
||||
|
||||
// Wait for all uploads to complete using the event loop
|
||||
while (upload_state.completed_count < upload_state.total_count) {
|
||||
_ = globalThis.bunVM().tick();
|
||||
|
||||
// Check if we should timeout
|
||||
// TODO: Add proper timeout handling
|
||||
}
|
||||
|
||||
if (upload_state.error_count > 0) {
|
||||
return error.S3UploadFailed;
|
||||
}
|
||||
|
||||
Output.prettyln("<r><green>✓<r> Successfully uploaded {d} files to S3", .{total_files});
|
||||
}
|
||||
|
||||
const S3UploadState = struct {
|
||||
total_count: usize,
|
||||
completed_count: usize,
|
||||
error_count: usize,
|
||||
globalThis: *JSC.JSGlobalObject,
|
||||
credentials: *bun.S3.S3Credentials,
|
||||
prefix: []const u8,
|
||||
};
|
||||
|
||||
const S3UploadTask = struct {
|
||||
state: *S3UploadState,
|
||||
path: []const u8,
|
||||
content: []const u8,
|
||||
content_type: []const u8,
|
||||
index: usize,
|
||||
|
||||
pub fn onComplete(result: bun.S3.S3UploadResult, ctx: *anyopaque) void {
|
||||
const task: *S3UploadTask = @ptrCast(@alignCast(ctx));
|
||||
defer {
|
||||
task.state.credentials.deref();
|
||||
bun.default_allocator.free(task.path);
|
||||
}
|
||||
|
||||
switch (result) {
|
||||
.success => {
|
||||
task.state.completed_count += 1;
|
||||
Output.prettyln("<r><d> Uploaded: {s}<r>", .{task.path});
|
||||
},
|
||||
.failure => |err| {
|
||||
task.state.error_count += 1;
|
||||
task.state.completed_count += 1;
|
||||
Output.prettyErrorln("<r><red>Failed to upload {s}: {s}<r>", .{ task.path, err.message });
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
10
src/cli.zig
10
src/cli.zig
@@ -1123,11 +1123,9 @@ pub const Arguments = struct {
|
||||
}
|
||||
}
|
||||
|
||||
if (args.option("--conditions")) |conditions| {
|
||||
var iter = std.mem.tokenizeAny(u8, conditions, " \t,");
|
||||
while (iter.next()) |condition| {
|
||||
try ctx.bundler_options.conditions.append(condition);
|
||||
}
|
||||
const conditions = args.options("--conditions");
|
||||
for (conditions) |condition| {
|
||||
try ctx.bundler_options.conditions.append(condition);
|
||||
}
|
||||
|
||||
// Handle --s3 flag
|
||||
@@ -1626,7 +1624,7 @@ pub const Command = struct {
|
||||
compile_target: Cli.CompileTarget = .{},
|
||||
windows_hide_console: bool = false,
|
||||
windows_icon: ?[]const u8 = null,
|
||||
conditions: std.ArrayList(string) = std.ArrayList(string){},
|
||||
conditions: std.ArrayList(string) = std.ArrayList(string).init(bun.default_allocator),
|
||||
s3_url: ?string = null,
|
||||
dump_environment_variables: bool = false,
|
||||
};
|
||||
|
||||
@@ -1,61 +1,61 @@
|
||||
// Generated with scripts/generate-perf-trace-events.sh
|
||||
pub const PerfEvent = enum(i32) {
|
||||
@"Bundler.BindImportsToExports",
|
||||
@"Bundler.CloneLinkerGraph",
|
||||
@"Bundler.CreateNamespaceExports",
|
||||
@"Bundler.FigureOutCommonJS",
|
||||
@"Bundler.MatchImportsWithExports",
|
||||
@"Bundler.ParseJS",
|
||||
@"Bundler.ParseJSON",
|
||||
@"Bundler.ParseTOML",
|
||||
@"Bundler.ResolveExportStarStatements",
|
||||
@"Bundler.Worker.create",
|
||||
@"Bundler.WrapDependencies",
|
||||
@"Bundler.breakOutputIntoPieces",
|
||||
@"Bundler.cloneAST",
|
||||
@"Bundler.computeChunks",
|
||||
@"Bundler.findAllImportedPartsInJSOrder",
|
||||
@"Bundler.findReachableFiles",
|
||||
@"Bundler.generateChunksInParallel",
|
||||
@"Bundler.generateCodeForFileInChunkCss",
|
||||
@"Bundler.generateCodeForFileInChunkJS",
|
||||
@"Bundler.generateIsolatedHash",
|
||||
@"Bundler.generateSourceMapForChunk",
|
||||
@"Bundler.markFileLiveForTreeShaking",
|
||||
@"Bundler.markFileReachableForCodeSplitting",
|
||||
@"Bundler.onParseTaskComplete",
|
||||
@"Bundler.postProcessJSChunk",
|
||||
@"Bundler.readFile",
|
||||
@"Bundler.renameSymbolsInChunk",
|
||||
@"Bundler.scanImportsAndExports",
|
||||
@"Bundler.treeShakingAndCodeSplitting",
|
||||
@"Bundler.writeChunkToDisk",
|
||||
@"Bundler.writeOutputFilesToDisk",
|
||||
@"ExtractTarball.extract",
|
||||
@"FolderResolver.readPackageJSONFromDisk.folder",
|
||||
@"FolderResolver.readPackageJSONFromDisk.workspace",
|
||||
@"JSBundler.addPlugin",
|
||||
@"JSBundler.hasAnyMatches",
|
||||
@"JSBundler.matchOnLoad",
|
||||
@"JSBundler.matchOnResolve",
|
||||
@"JSGlobalObject.create",
|
||||
@"JSParser.analyze",
|
||||
@"JSParser.parse",
|
||||
@"JSParser.postvisit",
|
||||
@"JSParser.visit",
|
||||
@"StandaloneModuleGraph.serialize",
|
||||
@"JSPrinter.print",
|
||||
@"JSPrinter.printWithSourceMap",
|
||||
@"ModuleResolver.resolve",
|
||||
@"PackageInstaller.install",
|
||||
@"PackageInstaller.installPatch",
|
||||
@"PackageManifest.Serializer.loadByFile",
|
||||
@"PackageManifest.Serializer.save",
|
||||
@"RuntimeTranspilerCache.fromFile",
|
||||
@"Bundler.Worker.create",
|
||||
@"Bundler.CloneLinkerGraph",
|
||||
@"Bundler.treeShakingAndCodeSplitting",
|
||||
@"Bundler.markFileLiveForTreeShaking",
|
||||
@"Bundler.markFileReachableForCodeSplitting",
|
||||
@"Bundler.generateSourceMapForChunk",
|
||||
@"Bundler.generateIsolatedHash",
|
||||
@"Bundler.breakOutputIntoPieces",
|
||||
@"Bundler.findReachableFiles",
|
||||
@"Bundler.cloneAST",
|
||||
@"Bundler.onParseTaskComplete",
|
||||
@"JSGlobalObject.create",
|
||||
@"RuntimeTranspilerCache.save",
|
||||
@"RuntimeTranspilerCache.fromFile",
|
||||
@"RuntimeTranspilerCache.toFile",
|
||||
@"StandaloneModuleGraph.serialize",
|
||||
@"Symbols.followAll",
|
||||
@"TestCommand.printCodeCoverageLCov",
|
||||
@"Bundler.ParseJS",
|
||||
@"Bundler.ParseJSON",
|
||||
@"Bundler.ParseTOML",
|
||||
@"Bundler.readFile",
|
||||
@"JSBundler.hasAnyMatches",
|
||||
@"JSBundler.matchOnLoad",
|
||||
@"JSBundler.matchOnResolve",
|
||||
@"JSBundler.addPlugin",
|
||||
@"Bundler.computeChunks",
|
||||
@"Bundler.postProcessJSChunk",
|
||||
@"Bundler.CreateNamespaceExports",
|
||||
@"Bundler.writeOutputFilesToDisk",
|
||||
@"Bundler.writeChunkToDisk",
|
||||
@"Bundler.generateCodeForFileInChunkCss",
|
||||
@"Bundler.renameSymbolsInChunk",
|
||||
@"TestCommand.printCodeCoverageLCovAndText",
|
||||
@"TestCommand.printCodeCoverageText",
|
||||
@"TestCommand.printCodeCoverageLCov",
|
||||
@"Bundler.scanImportsAndExports",
|
||||
@"Bundler.FigureOutCommonJS",
|
||||
@"Bundler.WrapDependencies",
|
||||
@"Bundler.ResolveExportStarStatements",
|
||||
@"Bundler.MatchImportsWithExports",
|
||||
@"Bundler.BindImportsToExports",
|
||||
@"Bundler.generateCodeForFileInChunkJS",
|
||||
@"Bundler.generateChunksInParallel",
|
||||
@"Bundler.findAllImportedPartsInJSOrder",
|
||||
@"ExtractTarball.extract",
|
||||
@"FolderResolver.readPackageJSONFromDisk.workspace",
|
||||
@"FolderResolver.readPackageJSONFromDisk.folder",
|
||||
@"PackageManifest.Serializer.save",
|
||||
@"PackageManifest.Serializer.loadByFile",
|
||||
@"Symbols.followAll",
|
||||
@"PackageInstaller.install",
|
||||
@"PackageInstaller.installPatch",
|
||||
@"JSParser.parse",
|
||||
@"JSParser.visit",
|
||||
@"JSParser.analyze",
|
||||
@"JSParser.postvisit",
|
||||
};
|
||||
|
||||
52
test_s3_build.js
Normal file
52
test_s3_build.js
Normal file
@@ -0,0 +1,52 @@
|
||||
// Test script for S3 build functionality
|
||||
import { $ } from "bun";
|
||||
|
||||
// Create a simple test file
|
||||
await Bun.write(
|
||||
"test_app.js",
|
||||
`
|
||||
console.log("Hello from S3 build test!");
|
||||
export default { message: "This is a test build" };
|
||||
`,
|
||||
);
|
||||
|
||||
// Test 1: Basic S3 URL support
|
||||
console.log("Test 1: Building with S3 URL...");
|
||||
try {
|
||||
const result = await Bun.build({
|
||||
entrypoints: ["./test_app.js"],
|
||||
outdir: "./out",
|
||||
s3: "s3://my-bucket/builds/test",
|
||||
});
|
||||
|
||||
console.log("Build result:", result);
|
||||
console.log("Success:", result.success);
|
||||
console.log("Outputs:", result.outputs?.length || 0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error.message);
|
||||
}
|
||||
|
||||
// Test 2: S3 with credentials object
|
||||
console.log("\nTest 2: Building with S3 credentials object...");
|
||||
try {
|
||||
const result = await Bun.build({
|
||||
entrypoints: ["./test_app.js"],
|
||||
outdir: "./out",
|
||||
s3: {
|
||||
url: "s3://my-bucket/builds/test2",
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
region: "us-west-2",
|
||||
},
|
||||
});
|
||||
|
||||
console.log("Build result:", result);
|
||||
console.log("Success:", result.success);
|
||||
console.log("Outputs:", result.outputs?.length || 0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error.message);
|
||||
}
|
||||
|
||||
// Clean up
|
||||
await $`rm -f test_app.js`;
|
||||
await $`rm -rf out`;
|
||||
Reference in New Issue
Block a user