mirror of
https://github.com/oven-sh/bun
synced 2026-02-08 18:08:50 +00:00
Compare commits
3 Commits
dylan/pyth
...
claude/san
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78e434a1e0 | ||
|
|
559771a82e | ||
|
|
9fe79ab119 |
11
src/cli.zig
11
src/cli.zig
@@ -92,6 +92,7 @@ pub const AuditCommand = @import("./cli/audit_command.zig").AuditCommand;
|
||||
pub const InitCommand = @import("./cli/init_command.zig").InitCommand;
|
||||
pub const WhyCommand = @import("./cli/why_command.zig").WhyCommand;
|
||||
pub const FuzzilliCommand = @import("./cli/fuzzilli_command.zig").FuzzilliCommand;
|
||||
pub const SandboxCommand = @import("./cli/sandbox_command.zig").SandboxCommand;
|
||||
|
||||
pub const Arguments = @import("./cli/Arguments.zig");
|
||||
|
||||
@@ -607,6 +608,7 @@ pub const Command = struct {
|
||||
RootCommandMatcher.case("help") => .HelpCommand,
|
||||
|
||||
RootCommandMatcher.case("exec") => .ExecCommand,
|
||||
RootCommandMatcher.case("sandbox") => .SandboxCommand,
|
||||
|
||||
RootCommandMatcher.case("outdated") => .OutdatedCommand,
|
||||
RootCommandMatcher.case("publish") => .PublishCommand,
|
||||
@@ -949,6 +951,10 @@ pub const Command = struct {
|
||||
return error.UnrecognizedCommand;
|
||||
}
|
||||
},
|
||||
.SandboxCommand => {
|
||||
const ctx = try Command.init(allocator, log, .SandboxCommand);
|
||||
try SandboxCommand.exec(ctx);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -985,6 +991,7 @@ pub const Command = struct {
|
||||
AuditCommand,
|
||||
WhyCommand,
|
||||
FuzzilliCommand,
|
||||
SandboxCommand,
|
||||
|
||||
/// Used by crash reports.
|
||||
///
|
||||
@@ -1023,6 +1030,7 @@ pub const Command = struct {
|
||||
.AuditCommand => 'A',
|
||||
.WhyCommand => 'W',
|
||||
.FuzzilliCommand => 'F',
|
||||
.SandboxCommand => 'S',
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1335,6 +1343,9 @@ pub const Command = struct {
|
||||
Output.pretty(intro_text, .{});
|
||||
Output.flush();
|
||||
},
|
||||
.SandboxCommand => {
|
||||
SandboxCommand.printHelp();
|
||||
},
|
||||
else => {
|
||||
HelpCommand.printWithReason(.explicit, false);
|
||||
},
|
||||
|
||||
291
src/cli/sandbox_command.zig
Normal file
291
src/cli/sandbox_command.zig
Normal file
@@ -0,0 +1,291 @@
|
||||
/// CLI command for running Sandboxfile-based sandboxes
|
||||
///
|
||||
/// Usage:
|
||||
/// bun sandbox [options] [path]
|
||||
///
|
||||
/// Options:
|
||||
/// --test Run tests only (no dev server)
|
||||
/// --dry-run Parse and validate without executing
|
||||
/// --no-isolate Disable namespace isolation (run directly on host)
|
||||
///
|
||||
/// Examples:
|
||||
/// bun sandbox # Run Sandboxfile in current directory
|
||||
/// bun sandbox ./my-project # Run Sandboxfile in specified directory
|
||||
/// bun sandbox --test # Run tests only
|
||||
pub const SandboxCommand = struct {
|
||||
pub fn exec(ctx: Command.Context) !void {
|
||||
const allocator = ctx.allocator;
|
||||
|
||||
// Parse command line arguments
|
||||
var sandboxfile_path: []const u8 = "Sandboxfile";
|
||||
var test_only = false;
|
||||
var dry_run = false;
|
||||
var no_isolate = false;
|
||||
|
||||
// Check all command line arguments for flags
|
||||
for (bun.argv) |arg| {
|
||||
if (std.mem.eql(u8, arg, "--test")) {
|
||||
test_only = true;
|
||||
} else if (std.mem.eql(u8, arg, "--dry-run")) {
|
||||
dry_run = true;
|
||||
} else if (std.mem.eql(u8, arg, "--no-isolate")) {
|
||||
no_isolate = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Skip the first positional (the command name "sandbox")
|
||||
const args = if (ctx.positionals.len > 0) ctx.positionals[1..] else ctx.positionals;
|
||||
|
||||
// Check positionals for paths
|
||||
for (args) |arg| {
|
||||
if (!std.mem.startsWith(u8, arg, "-")) {
|
||||
// Path argument
|
||||
if (std.fs.path.isAbsolute(arg)) {
|
||||
sandboxfile_path = arg;
|
||||
} else {
|
||||
// Construct path to Sandboxfile
|
||||
sandboxfile_path = std.fs.path.join(allocator, &.{ arg, "Sandboxfile" }) catch {
|
||||
Output.prettyErrorln("<r><red>error<r>: Out of memory", .{});
|
||||
Global.exit(1);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if Sandboxfile exists
|
||||
const file = std.fs.cwd().openFile(sandboxfile_path, .{}) catch |err| {
|
||||
if (err == error.FileNotFound) {
|
||||
Output.prettyErrorln("<r><red>error<r>: Sandboxfile not found at <b>{s}<r>", .{sandboxfile_path});
|
||||
Output.prettyErrorln("", .{});
|
||||
Output.prettyErrorln("Create a Sandboxfile to define your sandbox environment:", .{});
|
||||
Output.prettyErrorln("", .{});
|
||||
Output.prettyErrorln(" <cyan># Sandboxfile<r>", .{});
|
||||
Output.prettyErrorln(" <green>FROM<r> host", .{});
|
||||
Output.prettyErrorln(" <green>WORKDIR<r> .", .{});
|
||||
Output.prettyErrorln(" <green>RUN<r> bun install", .{});
|
||||
Output.prettyErrorln(" <green>DEV<r> PORT=3000 bun run dev", .{});
|
||||
Output.prettyErrorln(" <green>TEST<r> bun test", .{});
|
||||
Output.prettyErrorln("", .{});
|
||||
Output.prettyErrorln(" <cyan># Isolation (Linux only)<r>", .{});
|
||||
Output.prettyErrorln(" <green>OUTPUT<r> src/ <d># Only these paths are preserved<r>", .{});
|
||||
Output.prettyErrorln(" <green>NET<r> registry.npmjs.org <d># Allowed network hosts<r>", .{});
|
||||
Output.prettyErrorln(" <green>SECRET<r> API_KEY <d># Masked environment variables<r>", .{});
|
||||
Output.prettyErrorln("", .{});
|
||||
Global.exit(1);
|
||||
}
|
||||
Output.prettyErrorln("<r><red>error<r>: Failed to open Sandboxfile: {s}", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
// Read and parse the Sandboxfile
|
||||
const source = file.readToEndAlloc(allocator, 1024 * 1024) catch |err| {
|
||||
Output.prettyErrorln("<r><red>error<r>: Failed to read Sandboxfile: {s}", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
};
|
||||
defer allocator.free(source);
|
||||
|
||||
var parser = sandboxfile.Parser.init(allocator, source);
|
||||
const config = parser.parse() catch |err| {
|
||||
Output.prettyErrorln("<r><red>error<r>: Failed to parse Sandboxfile: {s}", .{@errorName(err)});
|
||||
for (parser.getErrors()) |parse_err| {
|
||||
Output.prettyErrorln(" line {d}: {s}", .{ parse_err.line, parse_err.message });
|
||||
}
|
||||
Global.exit(1);
|
||||
};
|
||||
|
||||
// Print parsed configuration
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: Parsed Sandboxfile:", .{});
|
||||
Output.prettyErrorln(" FROM: {s}", .{switch (config.base_env) {
|
||||
.host => "host",
|
||||
.image => |img| img,
|
||||
}});
|
||||
Output.prettyErrorln(" WORKDIR: {s}", .{config.workdir});
|
||||
|
||||
if (config.run_commands.items.len > 0) {
|
||||
Output.prettyErrorln(" RUN commands: {d}", .{config.run_commands.items.len});
|
||||
}
|
||||
if (config.dev) |dev| {
|
||||
Output.prettyErrorln(" DEV: {s}", .{dev.command});
|
||||
}
|
||||
if (config.services.items.len > 0) {
|
||||
Output.prettyErrorln(" Services: {d}", .{config.services.items.len});
|
||||
}
|
||||
if (config.tests.items.len > 0) {
|
||||
Output.prettyErrorln(" Tests: {d}", .{config.tests.items.len});
|
||||
}
|
||||
if (config.outputs.items.len > 0) {
|
||||
Output.prettyErrorln(" Outputs: {d}", .{config.outputs.items.len});
|
||||
}
|
||||
if (config.allowed_hosts.items.len > 0) {
|
||||
Output.prettyErrorln(" Allowed hosts: {d}", .{config.allowed_hosts.items.len});
|
||||
}
|
||||
if (config.secrets.items.len > 0) {
|
||||
Output.prettyErrorln(" Secrets: {d}", .{config.secrets.items.len});
|
||||
}
|
||||
Output.prettyErrorln("", .{});
|
||||
Output.flush();
|
||||
|
||||
if (dry_run) {
|
||||
Output.prettyErrorln("<green>Sandboxfile is valid<r>", .{});
|
||||
Output.flush();
|
||||
Global.exit(0);
|
||||
}
|
||||
|
||||
// Check for sandbox support
|
||||
if (comptime bun.Environment.isLinux) {
|
||||
const ns_support = sandbox.linux.checkNamespaceSupport();
|
||||
const overlay_support = sandbox.linux.checkOverlaySupport();
|
||||
|
||||
if (ns_support and overlay_support and !no_isolate) {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: <green>Linux namespace isolation available<r>", .{});
|
||||
} else {
|
||||
if (no_isolate) {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: <yellow>Isolation disabled by --no-isolate<r>", .{});
|
||||
} else {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: <yellow>Isolation unavailable<r> (namespaces: {s}, overlayfs: {s})", .{
|
||||
if (ns_support) "yes" else "no",
|
||||
if (overlay_support) "yes" else "no",
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: <yellow>Isolation only available on Linux<r>", .{});
|
||||
}
|
||||
Output.flush();
|
||||
|
||||
// Create and run the sandbox
|
||||
var sb = sandbox.Sandbox.init(allocator, config) catch |err| {
|
||||
Output.prettyErrorln("<r><red>error<r>: Failed to initialize sandbox: {s}", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
};
|
||||
defer sb.deinit();
|
||||
|
||||
if (test_only) {
|
||||
// Only run tests
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: Running tests only...", .{});
|
||||
Output.flush();
|
||||
|
||||
for (config.tests.items) |test_cmd| {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: TEST <b>{s}<r>", .{test_cmd.command});
|
||||
Output.flush();
|
||||
|
||||
const exit_code = sb.run(test_cmd.command) catch |err| {
|
||||
Output.prettyErrorln("<r><red>error<r>: Test execution failed: {s}", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
};
|
||||
|
||||
if (exit_code != 0) {
|
||||
Output.prettyErrorln("<red>Test failed with exit code {d}<r>", .{exit_code});
|
||||
Global.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
Output.prettyErrorln("<green>All tests passed<r>", .{});
|
||||
Global.exit(0);
|
||||
}
|
||||
|
||||
// Run the full sandbox
|
||||
var result = sb.execute() catch |err| {
|
||||
Output.prettyErrorln("<r><red>error<r>: Sandbox execution failed: {s}", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
};
|
||||
defer result.deinit();
|
||||
|
||||
// Print results
|
||||
if (!result.setup_success) {
|
||||
Output.prettyErrorln("<red>Setup failed<r>", .{});
|
||||
for (result.errors.items) |err_msg| {
|
||||
Output.prettyErrorln(" {s}", .{err_msg});
|
||||
}
|
||||
Global.exit(1);
|
||||
}
|
||||
|
||||
if (!result.tests_success) {
|
||||
Output.prettyErrorln("<yellow>Some tests failed<r>", .{});
|
||||
for (result.errors.items) |err_msg| {
|
||||
Output.prettyErrorln(" {s}", .{err_msg});
|
||||
}
|
||||
}
|
||||
|
||||
// Extract outputs if sandboxed
|
||||
if (result.sandboxed and config.outputs.items.len > 0) {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: Extracting outputs...", .{});
|
||||
sb.extractOutputs(".") catch |err| {
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Failed to extract outputs: {s}", .{@errorName(err)});
|
||||
};
|
||||
}
|
||||
|
||||
Output.prettyErrorln("", .{});
|
||||
if (result.sandboxed) {
|
||||
Output.prettyErrorln("<green>Sandbox completed successfully<r> (isolated)", .{});
|
||||
} else {
|
||||
Output.prettyErrorln("<green>Sandbox completed successfully<r> (not isolated)", .{});
|
||||
}
|
||||
Output.flush();
|
||||
|
||||
Global.exit(if (result.setup_success and result.tests_success) 0 else 1);
|
||||
}
|
||||
|
||||
pub fn printHelp() void {
|
||||
const help_text =
|
||||
\\<b>Usage<r>: <b><green>bun sandbox<r> <cyan>[options]<r> <blue>[path]<r>
|
||||
\\ Run a sandbox environment defined by a Sandboxfile.
|
||||
\\
|
||||
\\<b>Options:<r>
|
||||
\\ <cyan>--test<r> Run tests only (no dev server)
|
||||
\\ <cyan>--dry-run<r> Parse and validate without executing
|
||||
\\ <cyan>--no-isolate<r> Disable namespace isolation (run directly on host)
|
||||
\\
|
||||
\\<b>Arguments:<r>
|
||||
\\ <blue>[path]<r> Path to directory containing Sandboxfile (default: current directory)
|
||||
\\
|
||||
\\<b>Examples:<r>
|
||||
\\ <d>Run sandbox in current directory<r>
|
||||
\\ <b><green>bun sandbox<r>
|
||||
\\
|
||||
\\ <d>Run sandbox in specified directory<r>
|
||||
\\ <b><green>bun sandbox<r> <blue>./my-project<r>
|
||||
\\
|
||||
\\ <d>Run tests only<r>
|
||||
\\ <b><green>bun sandbox<r> <cyan>--test<r>
|
||||
\\
|
||||
\\ <d>Validate Sandboxfile<r>
|
||||
\\ <b><green>bun sandbox<r> <cyan>--dry-run<r>
|
||||
\\
|
||||
\\<b>Isolation (Linux only):<r>
|
||||
\\ When running on Linux with user namespace support, the sandbox provides:
|
||||
\\ - <b>Ephemeral filesystem<r>: Changes outside OUTPUT paths are discarded
|
||||
\\ - <b>Network filtering<r>: Only NET-allowed hosts are accessible
|
||||
\\ - <b>Process isolation<r>: Separate PID namespace
|
||||
\\ - <b>Secret masking<r>: SECRET vars passed but hidden from inspection
|
||||
\\
|
||||
\\<b>Sandboxfile Directives:<r>
|
||||
\\ <green>FROM<r> Base environment (host or container image)
|
||||
\\ <green>WORKDIR<r> Working directory
|
||||
\\ <green>RUN<r> Setup command (executed once)
|
||||
\\ <green>DEV<r> Dev server command (PORT=, WATCH= options)
|
||||
\\ <green>SERVICE<r> Background service (name, PORT=, command)
|
||||
\\ <green>TEST<r> Test command
|
||||
\\ <green>OUTPUT<r> Output path (preserved from sandbox)
|
||||
\\ <green>LOGS<r> Log path pattern
|
||||
\\ <green>NET<r> Allowed network host
|
||||
\\ <green>SECRET<r> Secret environment variable
|
||||
\\ <green>INFER<r> Auto-generate from repo analysis
|
||||
\\
|
||||
\\Full documentation is available at <magenta>https://bun.com/docs/cli/sandbox<r>
|
||||
\\
|
||||
;
|
||||
|
||||
Output.pretty(help_text, .{});
|
||||
Output.flush();
|
||||
}
|
||||
};
|
||||
|
||||
const std = @import("std");
|
||||
const bun = @import("bun");
|
||||
const sandbox = @import("../sandbox.zig");
|
||||
const sandboxfile = @import("../sandboxfile.zig");
|
||||
const Output = bun.Output;
|
||||
const Global = bun.Global;
|
||||
const Command = bun.cli.Command;
|
||||
260
src/sandbox.zig
Normal file
260
src/sandbox.zig
Normal file
@@ -0,0 +1,260 @@
|
||||
//! Bun Sandbox Module
|
||||
//!
|
||||
//! Provides container-like isolation for running untrusted code.
|
||||
//! Uses Linux namespaces and overlayfs on Linux, with fallback
|
||||
//! behavior on other platforms.
|
||||
//!
|
||||
//! Features:
|
||||
//! - Ephemeral filesystem (only OUTPUT paths preserved)
|
||||
//! - Network access control (NET allowed hosts)
|
||||
//! - Secret masking (SECRET env vars)
|
||||
//! - Process isolation (PID namespace)
|
||||
|
||||
const std = @import("std");
|
||||
const bun = @import("bun");
|
||||
const builtin = @import("builtin");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub const linux = if (builtin.os.tag == .linux) @import("sandbox/linux.zig") else struct {};
|
||||
pub const sandboxfile = @import("sandboxfile.zig");
|
||||
|
||||
const Output = bun.Output;
|
||||
|
||||
/// Platform-independent sandbox interface
|
||||
pub const Sandbox = struct {
|
||||
allocator: Allocator,
|
||||
config: sandboxfile.Sandboxfile,
|
||||
|
||||
/// Platform-specific implementation
|
||||
impl: if (builtin.os.tag == .linux) linux.Sandbox else void,
|
||||
|
||||
/// Whether real sandboxing is available
|
||||
sandboxed: bool,
|
||||
|
||||
/// Environment map for fallback mode
|
||||
env_map: std.process.EnvMap,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, config: sandboxfile.Sandboxfile) !Self {
|
||||
// Set up environment (always needed for BUN_SANDBOX marker)
|
||||
var env_map = std.process.EnvMap.init(allocator);
|
||||
|
||||
// Inherit environment
|
||||
var parent_env = try std.process.getEnvMap(allocator);
|
||||
defer parent_env.deinit();
|
||||
|
||||
var env_iter = parent_env.iterator();
|
||||
while (env_iter.next()) |entry| {
|
||||
try env_map.put(entry.key_ptr.*, entry.value_ptr.*);
|
||||
}
|
||||
|
||||
// Add sandbox marker
|
||||
try env_map.put("BUN_SANDBOX", "1");
|
||||
|
||||
var self = Self{
|
||||
.allocator = allocator,
|
||||
.config = config,
|
||||
.impl = undefined,
|
||||
.sandboxed = false,
|
||||
.env_map = env_map,
|
||||
};
|
||||
|
||||
if (builtin.os.tag == .linux) {
|
||||
// Check if we can use real sandboxing
|
||||
if (linux.checkNamespaceSupport() and linux.checkOverlaySupport()) {
|
||||
// Get current directory as root
|
||||
var cwd_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
const cwd = std.fs.cwd().realpath(".", &cwd_buf) catch ".";
|
||||
|
||||
self.impl = linux.Sandbox.init(allocator, .{
|
||||
.root_dir = cwd,
|
||||
.workdir = config.workdir,
|
||||
.output_paths = config.outputs.items,
|
||||
.allowed_hosts = config.allowed_hosts.items,
|
||||
.secrets = config.secrets.items,
|
||||
.env = env_map,
|
||||
});
|
||||
|
||||
self.impl.setup() catch |err| {
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Failed to set up sandbox isolation: {s}", .{@errorName(err)});
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Running without isolation (changes will persist)", .{});
|
||||
self.sandboxed = false;
|
||||
return self;
|
||||
};
|
||||
|
||||
self.sandboxed = true;
|
||||
} else {
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Linux namespaces or overlayfs not available", .{});
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Running without isolation", .{});
|
||||
}
|
||||
} else {
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Sandbox isolation only available on Linux", .{});
|
||||
Output.prettyErrorln("<r><yellow>warning<r>: Running without isolation", .{});
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/// Run a command inside the sandbox
|
||||
pub fn run(self: *Self, command: []const u8) !u8 {
|
||||
if (self.sandboxed and builtin.os.tag == .linux) {
|
||||
// Run inside isolated sandbox
|
||||
const argv = &[_][]const u8{ "/bin/sh", "-c", command };
|
||||
return self.impl.exec(argv);
|
||||
} else {
|
||||
// Fallback: run directly (no isolation)
|
||||
return self.runDirect(command);
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a command directly without isolation (fallback)
|
||||
fn runDirect(self: *Self, command: []const u8) !u8 {
|
||||
const result = try std.process.Child.run(.{
|
||||
.allocator = self.allocator,
|
||||
.argv = &.{ "/bin/sh", "-c", command },
|
||||
.cwd = self.config.workdir,
|
||||
.env_map = &self.env_map,
|
||||
});
|
||||
|
||||
if (result.stdout.len > 0) {
|
||||
Output.prettyError("{s}", .{result.stdout});
|
||||
Output.flush();
|
||||
}
|
||||
if (result.stderr.len > 0) {
|
||||
Output.prettyError("{s}", .{result.stderr});
|
||||
Output.flush();
|
||||
}
|
||||
|
||||
self.allocator.free(result.stdout);
|
||||
self.allocator.free(result.stderr);
|
||||
|
||||
return switch (result.term) {
|
||||
.Exited => |code| code,
|
||||
.Signal => 128,
|
||||
.Stopped => 128,
|
||||
.Unknown => 128,
|
||||
};
|
||||
}
|
||||
|
||||
/// Execute the full Sandboxfile
|
||||
pub fn execute(self: *Self) !SandboxResult {
|
||||
var result = SandboxResult{
|
||||
.setup_success = true,
|
||||
.tests_success = true,
|
||||
.sandboxed = self.sandboxed,
|
||||
.errors = .{},
|
||||
.allocator = self.allocator,
|
||||
};
|
||||
|
||||
if (self.sandboxed) {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: Running in isolated sandbox", .{});
|
||||
} else {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: Running without isolation (changes will persist)", .{});
|
||||
}
|
||||
Output.flush();
|
||||
|
||||
// Run setup commands
|
||||
for (self.config.run_commands.items) |run_cmd| {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: RUN <b>{s}<r>", .{run_cmd.command});
|
||||
Output.flush();
|
||||
|
||||
const exit_code = self.run(run_cmd.command) catch |err| {
|
||||
result.setup_success = false;
|
||||
try result.errors.append(self.allocator, try std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"RUN command failed: {s} ({s})",
|
||||
.{ run_cmd.command, @errorName(err) },
|
||||
));
|
||||
return result;
|
||||
};
|
||||
|
||||
if (exit_code != 0) {
|
||||
result.setup_success = false;
|
||||
try result.errors.append(self.allocator, try std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"RUN command failed: {s} (exit code {d})",
|
||||
.{ run_cmd.command, exit_code },
|
||||
));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests
|
||||
for (self.config.tests.items) |test_cmd| {
|
||||
Output.prettyErrorln("<cyan>sandbox<r>: TEST <b>{s}<r>", .{test_cmd.command});
|
||||
Output.flush();
|
||||
|
||||
const exit_code = self.run(test_cmd.command) catch |err| {
|
||||
result.tests_success = false;
|
||||
try result.errors.append(self.allocator, try std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"TEST command failed: {s} ({s})",
|
||||
.{ test_cmd.command, @errorName(err) },
|
||||
));
|
||||
continue;
|
||||
};
|
||||
|
||||
if (exit_code != 0) {
|
||||
result.tests_success = false;
|
||||
try result.errors.append(self.allocator, try std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"TEST command failed: {s} (exit code {d})",
|
||||
.{ test_cmd.command, exit_code },
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Extract OUTPUT paths from sandbox to destination
|
||||
pub fn extractOutputs(self: *Self, dest_dir: []const u8) !void {
|
||||
if (self.sandboxed and builtin.os.tag == .linux) {
|
||||
try self.impl.extractOutputs(dest_dir);
|
||||
}
|
||||
// No-op if not sandboxed - files are already in place
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
if (self.sandboxed and builtin.os.tag == .linux) {
|
||||
self.impl.deinit();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const SandboxResult = struct {
|
||||
setup_success: bool,
|
||||
tests_success: bool,
|
||||
sandboxed: bool,
|
||||
errors: std.ArrayListUnmanaged([]const u8),
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn deinit(self: *SandboxResult) void {
|
||||
for (self.errors.items) |err| {
|
||||
self.allocator.free(err);
|
||||
}
|
||||
self.errors.deinit(self.allocator);
|
||||
}
|
||||
};
|
||||
|
||||
/// Run a Sandboxfile
|
||||
pub fn runSandboxfile(allocator: Allocator, config: sandboxfile.Sandboxfile) !SandboxResult {
|
||||
var sandbox = try Sandbox.init(allocator, config);
|
||||
defer sandbox.deinit();
|
||||
|
||||
return sandbox.execute();
|
||||
}
|
||||
|
||||
test "sandbox initialization" {
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
var config = sandboxfile.Sandboxfile{};
|
||||
config.workdir = ".";
|
||||
|
||||
var sandbox = try Sandbox.init(allocator, config);
|
||||
defer sandbox.deinit();
|
||||
|
||||
// Should at least initialize without crashing
|
||||
// Actual sandboxing may not be available in test environment
|
||||
}
|
||||
511
src/sandbox/linux.zig
Normal file
511
src/sandbox/linux.zig
Normal file
@@ -0,0 +1,511 @@
|
||||
//! Linux Sandbox Implementation
|
||||
//!
|
||||
//! Provides true process isolation using Linux namespaces and overlayfs.
|
||||
//! This creates a container-like environment where:
|
||||
//!
|
||||
//! - The filesystem is ephemeral (overlayfs) - only OUTPUT paths are preserved
|
||||
//! - Network access is restricted to NET-allowed hosts
|
||||
//! - Processes run in isolated PID/mount/user namespaces
|
||||
//! - Secrets are available but masked from inspection
|
||||
//!
|
||||
//! Architecture:
|
||||
//! 1. Create user namespace (for unprivileged operation)
|
||||
//! 2. Create mount namespace
|
||||
//! 3. Set up overlayfs with:
|
||||
//! - lowerdir: original filesystem (read-only)
|
||||
//! - upperdir: ephemeral changes (tmpfs)
|
||||
//! - workdir: overlay work directory
|
||||
//! 4. Create network namespace with firewall rules
|
||||
//! 5. Create PID namespace for process isolation
|
||||
//! 6. Run commands inside the sandbox
|
||||
//! 7. Extract OUTPUT paths from upperdir
|
||||
|
||||
const std = @import("std");
|
||||
const bun = @import("bun");
|
||||
const linux = std.os.linux;
|
||||
const posix = std.posix;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const Output = bun.Output;
|
||||
|
||||
/// Errors that can occur during sandbox operations
|
||||
pub const SandboxError = error{
|
||||
NamespaceCreationFailed,
|
||||
MountFailed,
|
||||
OverlaySetupFailed,
|
||||
NetworkSetupFailed,
|
||||
ForkFailed,
|
||||
ExecFailed,
|
||||
PermissionDenied,
|
||||
OutOfMemory,
|
||||
PathTooLong,
|
||||
InvalidConfiguration,
|
||||
};
|
||||
|
||||
/// Configuration for the sandbox
|
||||
pub const SandboxConfig = struct {
|
||||
/// Root directory to sandbox (will be overlayfs lowerdir)
|
||||
root_dir: []const u8,
|
||||
|
||||
/// Working directory inside the sandbox
|
||||
workdir: []const u8,
|
||||
|
||||
/// Paths that should be extracted after sandbox exits (relative to root)
|
||||
output_paths: []const []const u8,
|
||||
|
||||
/// Allowed network hosts (empty = deny all)
|
||||
allowed_hosts: []const []const u8,
|
||||
|
||||
/// Secret environment variable names (passed but masked)
|
||||
secrets: []const []const u8,
|
||||
|
||||
/// Environment variables to set
|
||||
env: std.process.EnvMap,
|
||||
};
|
||||
|
||||
/// Represents a running sandbox
|
||||
pub const Sandbox = struct {
|
||||
allocator: Allocator,
|
||||
config: SandboxConfig,
|
||||
|
||||
/// Temporary directory for overlay layers
|
||||
overlay_tmpdir: ?[]const u8,
|
||||
|
||||
/// Upper directory path (where changes are written)
|
||||
upperdir: ?[]const u8,
|
||||
|
||||
/// Work directory for overlayfs
|
||||
overlay_workdir: ?[]const u8,
|
||||
|
||||
/// Merged mount point
|
||||
merged_dir: ?[]const u8,
|
||||
|
||||
/// PID of the sandboxed process (in parent namespace)
|
||||
child_pid: ?posix.pid_t,
|
||||
|
||||
/// File descriptor for user namespace
|
||||
userns_fd: ?posix.fd_t,
|
||||
|
||||
/// File descriptor for mount namespace
|
||||
mntns_fd: ?posix.fd_t,
|
||||
|
||||
/// File descriptor for network namespace
|
||||
netns_fd: ?posix.fd_t,
|
||||
|
||||
/// File descriptor for PID namespace
|
||||
pidns_fd: ?posix.fd_t,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, config: SandboxConfig) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.config = config,
|
||||
.overlay_tmpdir = null,
|
||||
.upperdir = null,
|
||||
.overlay_workdir = null,
|
||||
.merged_dir = null,
|
||||
.child_pid = null,
|
||||
.userns_fd = null,
|
||||
.mntns_fd = null,
|
||||
.netns_fd = null,
|
||||
.pidns_fd = null,
|
||||
};
|
||||
}
|
||||
|
||||
/// Create the sandbox namespaces and filesystem
|
||||
pub fn setup(self: *Self) SandboxError!void {
|
||||
// Step 1: Create temporary directory structure for overlayfs
|
||||
try self.setupOverlayDirs();
|
||||
|
||||
// Step 2: Create namespaces
|
||||
try self.createNamespaces();
|
||||
|
||||
// Step 3: Set up overlayfs mount
|
||||
try self.mountOverlay();
|
||||
|
||||
// Step 4: Set up network filtering (if allowed_hosts specified)
|
||||
if (self.config.allowed_hosts.len > 0) {
|
||||
try self.setupNetworkFilter();
|
||||
}
|
||||
}
|
||||
|
||||
/// Set up directory structure for overlayfs
|
||||
fn setupOverlayDirs(self: *Self) SandboxError!void {
|
||||
// Create a temporary directory for overlay layers
|
||||
// Generate a unique name using pid and random number
|
||||
var tmpdir_buf: [128]u8 = undefined;
|
||||
const pid = linux.getpid();
|
||||
|
||||
// Use /tmp/bun-sandbox-<pid> as the base
|
||||
const tmpdir = std.fmt.bufPrint(&tmpdir_buf, "/tmp/bun-sandbox-{d}", .{pid}) catch {
|
||||
return SandboxError.PathTooLong;
|
||||
};
|
||||
|
||||
// Create the directory
|
||||
std.fs.makeDirAbsolute(tmpdir) catch |err| {
|
||||
if (err != error.PathAlreadyExists) {
|
||||
return SandboxError.OverlaySetupFailed;
|
||||
}
|
||||
};
|
||||
|
||||
self.overlay_tmpdir = self.allocator.dupe(u8, tmpdir) catch {
|
||||
return SandboxError.OutOfMemory;
|
||||
};
|
||||
|
||||
// Create subdirectories: upper, work, merged
|
||||
const dirs = [_][]const u8{ "upper", "work", "merged" };
|
||||
for (dirs) |subdir| {
|
||||
const path = std.fs.path.join(self.allocator, &.{ self.overlay_tmpdir.?, subdir }) catch {
|
||||
return SandboxError.OutOfMemory;
|
||||
};
|
||||
|
||||
std.fs.makeDirAbsolute(path) catch {
|
||||
return SandboxError.OverlaySetupFailed;
|
||||
};
|
||||
|
||||
if (std.mem.eql(u8, subdir, "upper")) {
|
||||
self.upperdir = path;
|
||||
} else if (std.mem.eql(u8, subdir, "work")) {
|
||||
self.overlay_workdir = path;
|
||||
} else if (std.mem.eql(u8, subdir, "merged")) {
|
||||
self.merged_dir = path;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create Linux namespaces for isolation
|
||||
fn createNamespaces(self: *Self) SandboxError!void {
|
||||
// Use unshare to create new namespaces
|
||||
// CLONE_NEWUSER: New user namespace (allows unprivileged namespace creation)
|
||||
// CLONE_NEWNS: New mount namespace
|
||||
// CLONE_NEWPID: New PID namespace
|
||||
// CLONE_NEWNET: New network namespace
|
||||
|
||||
const flags: usize = linux.CLONE.NEWUSER |
|
||||
linux.CLONE.NEWNS |
|
||||
linux.CLONE.NEWPID |
|
||||
linux.CLONE.NEWNET;
|
||||
|
||||
const result = linux.unshare(flags);
|
||||
if (result != 0) {
|
||||
const err = posix.errno(result);
|
||||
Output.prettyErrorln("<r><red>error<r>: Failed to create namespaces: {s}", .{@tagName(err)});
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
}
|
||||
|
||||
// Write uid_map and gid_map to allow the current user to be root in the namespace
|
||||
try self.setupUserNamespace();
|
||||
}
|
||||
|
||||
/// Set up user namespace mappings
|
||||
fn setupUserNamespace(self: *Self) SandboxError!void {
|
||||
_ = self;
|
||||
|
||||
const uid = linux.getuid();
|
||||
const gid = linux.getgid();
|
||||
|
||||
// Write to /proc/self/uid_map: map uid 0 (root) in namespace to our uid outside
|
||||
{
|
||||
const uid_map_path = "/proc/self/uid_map";
|
||||
var buf: [64]u8 = undefined;
|
||||
const content = std.fmt.bufPrint(&buf, "0 {d} 1\n", .{uid}) catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
|
||||
const file = std.fs.openFileAbsolute(uid_map_path, .{ .mode = .write_only }) catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
file.writeAll(content) catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
}
|
||||
|
||||
// Disable setgroups (required before writing gid_map for unprivileged users)
|
||||
{
|
||||
const setgroups_path = "/proc/self/setgroups";
|
||||
const file = std.fs.openFileAbsolute(setgroups_path, .{ .mode = .write_only }) catch {
|
||||
// May not exist on older kernels, continue
|
||||
return;
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
file.writeAll("deny\n") catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
}
|
||||
|
||||
// Write to /proc/self/gid_map
|
||||
{
|
||||
const gid_map_path = "/proc/self/gid_map";
|
||||
var buf: [64]u8 = undefined;
|
||||
const content = std.fmt.bufPrint(&buf, "0 {d} 1\n", .{gid}) catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
|
||||
const file = std.fs.openFileAbsolute(gid_map_path, .{ .mode = .write_only }) catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
file.writeAll(content) catch {
|
||||
return SandboxError.NamespaceCreationFailed;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Mount overlayfs combining the original root with ephemeral layer
|
||||
fn mountOverlay(self: *Self) SandboxError!void {
|
||||
// Build mount options string:
|
||||
// lowerdir=<root>,upperdir=<upper>,workdir=<work>
|
||||
var options_buf: [4096]u8 = undefined;
|
||||
const options = std.fmt.bufPrintZ(&options_buf, "lowerdir={s},upperdir={s},workdir={s}", .{
|
||||
self.config.root_dir,
|
||||
self.upperdir.?,
|
||||
self.overlay_workdir.?,
|
||||
}) catch {
|
||||
return SandboxError.PathTooLong;
|
||||
};
|
||||
|
||||
// Mount overlayfs
|
||||
const mount_result = linux.mount(
|
||||
@ptrCast("overlay"),
|
||||
@ptrCast(self.merged_dir.?.ptr),
|
||||
@ptrCast("overlay"),
|
||||
0,
|
||||
@intFromPtr(options.ptr),
|
||||
);
|
||||
|
||||
if (mount_result != 0) {
|
||||
const err = posix.errno(mount_result);
|
||||
Output.prettyErrorln("<r><red>error<r>: Failed to mount overlayfs: {s}", .{@tagName(err)});
|
||||
return SandboxError.MountFailed;
|
||||
}
|
||||
|
||||
// Make the mount private to prevent propagation
|
||||
const private_result = linux.mount(
|
||||
null,
|
||||
@ptrCast(self.merged_dir.?.ptr),
|
||||
null,
|
||||
linux.MS.PRIVATE,
|
||||
0,
|
||||
);
|
||||
|
||||
if (private_result != 0) {
|
||||
return SandboxError.MountFailed;
|
||||
}
|
||||
}
|
||||
|
||||
/// Set up network namespace filtering
|
||||
fn setupNetworkFilter(self: *Self) SandboxError!void {
|
||||
// In the network namespace, we need to:
|
||||
// 1. Set up a loopback interface
|
||||
// 2. Configure iptables/nftables rules to only allow traffic to allowed_hosts
|
||||
|
||||
// For now, the network namespace starts with no connectivity.
|
||||
// We'd need to set up a veth pair or use slirp4netns for proper networking.
|
||||
// This is a simplified implementation that blocks all external network access.
|
||||
|
||||
_ = self;
|
||||
// TODO: Implement proper network filtering with veth/slirp4netns
|
||||
// For MVP, network namespace isolation means no network access at all
|
||||
}
|
||||
|
||||
/// Run a command inside the sandbox
|
||||
pub fn exec(self: *Self, argv: []const []const u8) SandboxError!u8 {
|
||||
// Fork a child process
|
||||
const pid = linux.fork();
|
||||
|
||||
if (pid < 0) {
|
||||
return SandboxError.ForkFailed;
|
||||
}
|
||||
|
||||
if (pid == 0) {
|
||||
// Child process - we're inside the sandbox
|
||||
self.childExec(argv) catch {
|
||||
std.process.exit(127);
|
||||
};
|
||||
unreachable;
|
||||
}
|
||||
|
||||
// Parent process - wait for child
|
||||
self.child_pid = @intCast(pid);
|
||||
|
||||
var status: u32 = 0;
|
||||
_ = linux.waitpid(@intCast(pid), &status, 0);
|
||||
|
||||
// Extract exit code
|
||||
if (linux.W.IFEXITED(status)) {
|
||||
return linux.W.EXITSTATUS(status);
|
||||
}
|
||||
|
||||
return 128; // Killed by signal
|
||||
}
|
||||
|
||||
/// Execute in child process (inside sandbox)
|
||||
fn childExec(self: *Self, argv: []const []const u8) !void {
|
||||
// Change root to the merged overlay directory
|
||||
try std.posix.chdir(self.merged_dir.?);
|
||||
|
||||
// Convert argv to null-terminated format
|
||||
var argv_ptrs: [256]?[*:0]const u8 = undefined;
|
||||
for (argv, 0..) |arg, i| {
|
||||
if (i >= 255) break;
|
||||
argv_ptrs[i] = @ptrCast(arg.ptr);
|
||||
}
|
||||
argv_ptrs[argv.len] = null;
|
||||
|
||||
// Set up environment with secrets
|
||||
var envp_ptrs: [256]?[*:0]const u8 = undefined;
|
||||
var env_idx: usize = 0;
|
||||
|
||||
var env_iter = self.config.env.iterator();
|
||||
while (env_iter.next()) |entry| {
|
||||
if (env_idx >= 255) break;
|
||||
|
||||
// Format as KEY=VALUE
|
||||
var buf: [4096]u8 = undefined;
|
||||
const env_str = std.fmt.bufPrintZ(&buf, "{s}={s}", .{
|
||||
entry.key_ptr.*,
|
||||
entry.value_ptr.*,
|
||||
}) catch continue;
|
||||
|
||||
// Duplicate to persist
|
||||
const duped = self.allocator.dupeZ(u8, env_str) catch continue;
|
||||
envp_ptrs[env_idx] = duped;
|
||||
env_idx += 1;
|
||||
}
|
||||
envp_ptrs[env_idx] = null;
|
||||
|
||||
// Execute the command
|
||||
const err = linux.execve(
|
||||
argv_ptrs[0].?,
|
||||
@ptrCast(&argv_ptrs),
|
||||
@ptrCast(&envp_ptrs),
|
||||
);
|
||||
|
||||
// If we get here, execve failed
|
||||
_ = err;
|
||||
return error.ExecFailed;
|
||||
}
|
||||
|
||||
/// Extract OUTPUT paths from the overlay upper directory
|
||||
pub fn extractOutputs(self: *Self, dest_dir: []const u8) !void {
|
||||
for (self.config.output_paths) |output_path| {
|
||||
const src = std.fs.path.join(self.allocator, &.{ self.upperdir.?, output_path }) catch continue;
|
||||
defer self.allocator.free(src);
|
||||
|
||||
const dst = std.fs.path.join(self.allocator, &.{ dest_dir, output_path }) catch continue;
|
||||
defer self.allocator.free(dst);
|
||||
|
||||
// Check if the path exists in upperdir (was modified)
|
||||
const src_stat = std.fs.cwd().statFile(src) catch continue;
|
||||
|
||||
if (src_stat.kind == .directory) {
|
||||
// Recursively copy directory
|
||||
self.copyDirRecursive(src, dst) catch continue;
|
||||
} else {
|
||||
// Copy file
|
||||
std.fs.copyFileAbsolute(src, dst, .{}) catch continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn copyDirRecursive(self: *Self, src: []const u8, dst: []const u8) !void {
|
||||
// Create destination directory
|
||||
std.fs.makeDirAbsolute(dst) catch |err| {
|
||||
if (err != error.PathAlreadyExists) return err;
|
||||
};
|
||||
|
||||
var src_dir = try std.fs.openDirAbsolute(src, .{ .iterate = true });
|
||||
defer src_dir.close();
|
||||
|
||||
var iter = src_dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
const src_path = try std.fs.path.join(self.allocator, &.{ src, entry.name });
|
||||
defer self.allocator.free(src_path);
|
||||
|
||||
const dst_path = try std.fs.path.join(self.allocator, &.{ dst, entry.name });
|
||||
defer self.allocator.free(dst_path);
|
||||
|
||||
if (entry.kind == .directory) {
|
||||
try self.copyDirRecursive(src_path, dst_path);
|
||||
} else {
|
||||
std.fs.copyFileAbsolute(src_path, dst_path, .{}) catch continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Clean up sandbox resources
|
||||
pub fn cleanup(self: *Self) void {
|
||||
// Unmount overlayfs
|
||||
if (self.merged_dir) |merged| {
|
||||
_ = linux.umount(@ptrCast(merged.ptr));
|
||||
}
|
||||
|
||||
// Remove temporary directories
|
||||
if (self.overlay_tmpdir) |tmpdir| {
|
||||
std.fs.deleteTreeAbsolute(tmpdir) catch {};
|
||||
self.allocator.free(tmpdir);
|
||||
}
|
||||
|
||||
if (self.upperdir) |upper| {
|
||||
self.allocator.free(upper);
|
||||
}
|
||||
if (self.overlay_workdir) |work| {
|
||||
self.allocator.free(work);
|
||||
}
|
||||
if (self.merged_dir) |merged| {
|
||||
self.allocator.free(merged);
|
||||
}
|
||||
|
||||
// Kill child process if still running
|
||||
if (self.child_pid) |pid| {
|
||||
_ = linux.kill(pid, linux.SIG.KILL);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.cleanup();
|
||||
}
|
||||
};
|
||||
|
||||
/// Check if the current system supports unprivileged user namespaces
|
||||
pub fn checkNamespaceSupport() bool {
|
||||
// Try to read /proc/sys/kernel/unprivileged_userns_clone
|
||||
const file = std.fs.openFileAbsolute("/proc/sys/kernel/unprivileged_userns_clone", .{}) catch {
|
||||
// File doesn't exist - assume namespaces are supported (newer kernels)
|
||||
return true;
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
var buf: [2]u8 = undefined;
|
||||
const bytes_read = file.read(&buf) catch return false;
|
||||
|
||||
if (bytes_read > 0 and buf[0] == '1') {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if overlayfs is available
|
||||
pub fn checkOverlaySupport() bool {
|
||||
const file = std.fs.openFileAbsolute("/proc/filesystems", .{}) catch {
|
||||
return false;
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
var buf: [4096]u8 = undefined;
|
||||
const bytes_read = file.readAll(&buf) catch return false;
|
||||
|
||||
return std.mem.indexOf(u8, buf[0..bytes_read], "overlay") != null;
|
||||
}
|
||||
|
||||
test "namespace support check" {
|
||||
// This test just verifies the check functions don't crash
|
||||
_ = checkNamespaceSupport();
|
||||
_ = checkOverlaySupport();
|
||||
}
|
||||
628
src/sandboxfile.zig
Normal file
628
src/sandboxfile.zig
Normal file
@@ -0,0 +1,628 @@
|
||||
/// Sandboxfile Parser
|
||||
///
|
||||
/// A declarative spec for agent sandboxes. Sandboxfiles define isolated
|
||||
/// environments for running agents with controlled network access, secrets,
|
||||
/// and file system permissions.
|
||||
///
|
||||
/// Example Sandboxfile:
|
||||
/// ```
|
||||
/// # Sandboxfile
|
||||
///
|
||||
/// FROM host
|
||||
/// WORKDIR .
|
||||
///
|
||||
/// RUN bun install
|
||||
///
|
||||
/// DEV PORT=3000 WATCH=src/** bun run dev
|
||||
/// SERVICE db PORT=5432 docker compose up postgres
|
||||
/// SERVICE redis PORT=6379 redis-server
|
||||
/// TEST bun test
|
||||
///
|
||||
/// OUTPUT src/
|
||||
/// OUTPUT tests/
|
||||
/// OUTPUT package.json
|
||||
///
|
||||
/// LOGS logs/*
|
||||
///
|
||||
/// NET registry.npmjs.org
|
||||
/// NET api.stripe.com
|
||||
///
|
||||
/// SECRET STRIPE_API_KEY
|
||||
/// ```
|
||||
const std = @import("std");
|
||||
const bun = @import("bun");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ArrayList = std.ArrayListUnmanaged;
|
||||
|
||||
/// A key-value pair for directive options (e.g., PORT=3000)
|
||||
pub const KeyValue = struct {
|
||||
key: []const u8,
|
||||
value: []const u8,
|
||||
};
|
||||
|
||||
/// Base environment specification
|
||||
pub const BaseEnv = union(enum) {
|
||||
/// Use the host environment directly
|
||||
host,
|
||||
/// Use a container image
|
||||
image: []const u8,
|
||||
};
|
||||
|
||||
/// A RUN directive - setup command executed once per agent
|
||||
pub const RunDirective = struct {
|
||||
command: []const u8,
|
||||
};
|
||||
|
||||
/// A DEV directive - primary dev server
|
||||
pub const DevDirective = struct {
|
||||
name: ?[]const u8,
|
||||
port: ?u16,
|
||||
watch: ?[]const u8,
|
||||
command: []const u8,
|
||||
};
|
||||
|
||||
/// A SERVICE directive - background process
|
||||
pub const ServiceDirective = struct {
|
||||
name: []const u8,
|
||||
port: ?u16,
|
||||
watch: ?[]const u8,
|
||||
command: []const u8,
|
||||
};
|
||||
|
||||
/// A TEST directive - verification command
|
||||
pub const TestDirective = struct {
|
||||
name: ?[]const u8,
|
||||
command: []const u8,
|
||||
};
|
||||
|
||||
/// Parsed Sandboxfile configuration
|
||||
pub const Sandboxfile = struct {
|
||||
/// Base environment (FROM directive)
|
||||
base_env: BaseEnv = .host,
|
||||
|
||||
/// Working directory (WORKDIR directive)
|
||||
workdir: []const u8 = ".",
|
||||
|
||||
/// Setup commands (RUN directives)
|
||||
run_commands: ArrayList(RunDirective) = .{},
|
||||
|
||||
/// Primary dev server (DEV directive)
|
||||
dev: ?DevDirective = null,
|
||||
|
||||
/// Background services (SERVICE directives)
|
||||
services: ArrayList(ServiceDirective) = .{},
|
||||
|
||||
/// Test commands (TEST directives)
|
||||
tests: ArrayList(TestDirective) = .{},
|
||||
|
||||
/// Output paths - files extracted from agent (OUTPUT directives)
|
||||
outputs: ArrayList([]const u8) = .{},
|
||||
|
||||
/// Log paths - streams agent can tail (LOGS directives)
|
||||
logs: ArrayList([]const u8) = .{},
|
||||
|
||||
/// Allowed network hosts (NET directives)
|
||||
allowed_hosts: ArrayList([]const u8) = .{},
|
||||
|
||||
/// Secret environment variable names (SECRET directives)
|
||||
secrets: ArrayList([]const u8) = .{},
|
||||
|
||||
/// Whether INFER mode is enabled
|
||||
infer_enabled: bool = false,
|
||||
|
||||
/// INFER pattern (default "*" when enabled)
|
||||
infer_pattern: ?[]const u8 = null,
|
||||
|
||||
pub fn deinit(self: *Sandboxfile, allocator: Allocator) void {
|
||||
self.run_commands.deinit(allocator);
|
||||
self.services.deinit(allocator);
|
||||
self.tests.deinit(allocator);
|
||||
self.outputs.deinit(allocator);
|
||||
self.logs.deinit(allocator);
|
||||
self.allowed_hosts.deinit(allocator);
|
||||
self.secrets.deinit(allocator);
|
||||
}
|
||||
};
|
||||
|
||||
/// Sandboxfile parsing errors
|
||||
pub const ParseError = error{
|
||||
InvalidDirective,
|
||||
MissingArgument,
|
||||
InvalidPort,
|
||||
DuplicateFrom,
|
||||
DuplicateWorkdir,
|
||||
DuplicateDev,
|
||||
MissingServiceName,
|
||||
UnexpectedToken,
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
/// Parser for Sandboxfile format
|
||||
pub const Parser = struct {
|
||||
allocator: Allocator,
|
||||
source: []const u8,
|
||||
line_number: usize,
|
||||
result: Sandboxfile,
|
||||
errors: ArrayList(ParseErrorInfo),
|
||||
|
||||
pub const ParseErrorInfo = struct {
|
||||
line: usize,
|
||||
message: []const u8,
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator, source: []const u8) Parser {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.source = source,
|
||||
.line_number = 0,
|
||||
.result = .{},
|
||||
.errors = .{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Parser) void {
|
||||
self.result.deinit(self.allocator);
|
||||
self.errors.deinit(self.allocator);
|
||||
}
|
||||
|
||||
/// Parse the Sandboxfile and return the result
|
||||
pub fn parse(self: *Parser) ParseError!Sandboxfile {
|
||||
var lines = std.mem.splitScalar(u8, self.source, '\n');
|
||||
|
||||
while (lines.next()) |raw_line| {
|
||||
self.line_number += 1;
|
||||
|
||||
// Handle CRLF line endings
|
||||
const line = if (raw_line.len > 0 and raw_line[raw_line.len - 1] == '\r')
|
||||
raw_line[0 .. raw_line.len - 1]
|
||||
else
|
||||
raw_line;
|
||||
|
||||
// Skip empty lines and comments
|
||||
const trimmed = std.mem.trim(u8, line, " \t");
|
||||
if (trimmed.len == 0 or trimmed[0] == '#') continue;
|
||||
|
||||
try self.parseLine(trimmed);
|
||||
}
|
||||
|
||||
return self.result;
|
||||
}
|
||||
|
||||
fn parseLine(self: *Parser, line: []const u8) ParseError!void {
|
||||
// Find the directive (first word)
|
||||
const space_idx = std.mem.indexOfAny(u8, line, " \t");
|
||||
const directive = if (space_idx) |idx| line[0..idx] else line;
|
||||
const rest = if (space_idx) |idx| std.mem.trimLeft(u8, line[idx..], " \t") else "";
|
||||
|
||||
if (std.mem.eql(u8, directive, "FROM")) {
|
||||
try self.parseFrom(rest);
|
||||
} else if (std.mem.eql(u8, directive, "WORKDIR")) {
|
||||
try self.parseWorkdir(rest);
|
||||
} else if (std.mem.eql(u8, directive, "RUN")) {
|
||||
try self.parseRun(rest);
|
||||
} else if (std.mem.eql(u8, directive, "DEV")) {
|
||||
try self.parseDev(rest);
|
||||
} else if (std.mem.eql(u8, directive, "SERVICE")) {
|
||||
try self.parseService(rest);
|
||||
} else if (std.mem.eql(u8, directive, "TEST")) {
|
||||
try self.parseTest(rest);
|
||||
} else if (std.mem.eql(u8, directive, "OUTPUT")) {
|
||||
try self.parseOutput(rest);
|
||||
} else if (std.mem.eql(u8, directive, "LOGS")) {
|
||||
try self.parseLogs(rest);
|
||||
} else if (std.mem.eql(u8, directive, "NET")) {
|
||||
try self.parseNet(rest);
|
||||
} else if (std.mem.eql(u8, directive, "SECRET")) {
|
||||
try self.parseSecret(rest);
|
||||
} else if (std.mem.eql(u8, directive, "INFER")) {
|
||||
try self.parseInfer(rest);
|
||||
} else {
|
||||
try self.addError("Unknown directive");
|
||||
return ParseError.InvalidDirective;
|
||||
}
|
||||
}
|
||||
|
||||
fn parseFrom(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("FROM requires an argument (e.g., 'host' or an image name)");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
|
||||
if (std.mem.eql(u8, rest, "host")) {
|
||||
self.result.base_env = .host;
|
||||
} else {
|
||||
self.result.base_env = .{ .image = rest };
|
||||
}
|
||||
}
|
||||
|
||||
fn parseWorkdir(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("WORKDIR requires a path argument");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
self.result.workdir = rest;
|
||||
}
|
||||
|
||||
fn parseRun(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("RUN requires a command");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
try self.result.run_commands.append(self.allocator, .{ .command = rest });
|
||||
}
|
||||
|
||||
fn parseDev(self: *Parser, rest: []const u8) ParseError!void {
|
||||
var dev = DevDirective{
|
||||
.name = null,
|
||||
.port = null,
|
||||
.watch = null,
|
||||
.command = "",
|
||||
};
|
||||
|
||||
var remaining = rest;
|
||||
|
||||
// Parse optional key=value pairs before the command
|
||||
while (remaining.len > 0) {
|
||||
const parsed = self.parseKeyValueOrToken(remaining);
|
||||
if (parsed.key_value) |kv| {
|
||||
if (std.mem.eql(u8, kv.key, "PORT")) {
|
||||
dev.port = std.fmt.parseInt(u16, kv.value, 10) catch {
|
||||
try self.addError("Invalid port number");
|
||||
return ParseError.InvalidPort;
|
||||
};
|
||||
} else if (std.mem.eql(u8, kv.key, "WATCH")) {
|
||||
dev.watch = kv.value;
|
||||
} else {
|
||||
// Not a recognized key=value, treat as start of command
|
||||
break;
|
||||
}
|
||||
remaining = parsed.rest;
|
||||
} else {
|
||||
// Not a key=value, must be the command
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
remaining = std.mem.trimLeft(u8, remaining, " \t");
|
||||
if (remaining.len == 0) {
|
||||
try self.addError("DEV requires a command");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
dev.command = remaining;
|
||||
self.result.dev = dev;
|
||||
}
|
||||
|
||||
fn parseService(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("SERVICE requires a name and command");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
|
||||
// First token is the service name
|
||||
const space_idx = std.mem.indexOfAny(u8, rest, " \t");
|
||||
const name = if (space_idx) |idx| rest[0..idx] else {
|
||||
try self.addError("SERVICE requires a command after the name");
|
||||
return ParseError.MissingArgument;
|
||||
};
|
||||
|
||||
var service = ServiceDirective{
|
||||
.name = name,
|
||||
.port = null,
|
||||
.watch = null,
|
||||
.command = "",
|
||||
};
|
||||
|
||||
var remaining = std.mem.trimLeft(u8, rest[space_idx.?..], " \t");
|
||||
|
||||
// Parse optional key=value pairs before the command
|
||||
while (remaining.len > 0) {
|
||||
const parsed = self.parseKeyValueOrToken(remaining);
|
||||
if (parsed.key_value) |kv| {
|
||||
if (std.mem.eql(u8, kv.key, "PORT")) {
|
||||
service.port = std.fmt.parseInt(u16, kv.value, 10) catch {
|
||||
try self.addError("Invalid port number");
|
||||
return ParseError.InvalidPort;
|
||||
};
|
||||
} else if (std.mem.eql(u8, kv.key, "WATCH")) {
|
||||
service.watch = kv.value;
|
||||
} else {
|
||||
// Not a recognized key=value, treat as start of command
|
||||
break;
|
||||
}
|
||||
remaining = parsed.rest;
|
||||
} else {
|
||||
// Not a key=value, must be the command
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
remaining = std.mem.trimLeft(u8, remaining, " \t");
|
||||
if (remaining.len == 0) {
|
||||
try self.addError("SERVICE requires a command");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
service.command = remaining;
|
||||
try self.result.services.append(self.allocator, service);
|
||||
}
|
||||
|
||||
fn parseTest(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("TEST requires a command");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
|
||||
try self.result.tests.append(self.allocator, .{
|
||||
.name = null,
|
||||
.command = rest,
|
||||
});
|
||||
}
|
||||
|
||||
fn parseOutput(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("OUTPUT requires a path");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
try self.result.outputs.append(self.allocator, rest);
|
||||
}
|
||||
|
||||
fn parseLogs(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("LOGS requires a path pattern");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
try self.result.logs.append(self.allocator, rest);
|
||||
}
|
||||
|
||||
fn parseNet(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("NET requires a host");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
try self.result.allowed_hosts.append(self.allocator, rest);
|
||||
}
|
||||
|
||||
fn parseSecret(self: *Parser, rest: []const u8) ParseError!void {
|
||||
if (rest.len == 0) {
|
||||
try self.addError("SECRET requires an environment variable name");
|
||||
return ParseError.MissingArgument;
|
||||
}
|
||||
try self.result.secrets.append(self.allocator, rest);
|
||||
}
|
||||
|
||||
fn parseInfer(self: *Parser, rest: []const u8) ParseError!void {
|
||||
self.result.infer_enabled = true;
|
||||
if (rest.len > 0) {
|
||||
self.result.infer_pattern = rest;
|
||||
} else {
|
||||
self.result.infer_pattern = "*";
|
||||
}
|
||||
}
|
||||
|
||||
const ParsedKeyValue = struct {
|
||||
key_value: ?KeyValue,
|
||||
rest: []const u8,
|
||||
};
|
||||
|
||||
/// Try to parse a KEY=VALUE token from the beginning of the string.
|
||||
/// Returns the key-value pair if found, and the remaining string.
|
||||
fn parseKeyValueOrToken(self: *Parser, input: []const u8) ParsedKeyValue {
|
||||
_ = self;
|
||||
const trimmed = std.mem.trimLeft(u8, input, " \t");
|
||||
if (trimmed.len == 0) return .{ .key_value = null, .rest = "" };
|
||||
|
||||
// Find the end of this token (space or tab)
|
||||
const token_end = std.mem.indexOfAny(u8, trimmed, " \t") orelse trimmed.len;
|
||||
const token = trimmed[0..token_end];
|
||||
|
||||
// Check if this token contains '='
|
||||
if (std.mem.indexOfScalar(u8, token, '=')) |eq_idx| {
|
||||
const key = token[0..eq_idx];
|
||||
const value = token[eq_idx + 1 ..];
|
||||
|
||||
// Only treat as key=value if key is uppercase (like PORT, WATCH)
|
||||
var is_uppercase_key = true;
|
||||
for (key) |c| {
|
||||
if (c < 'A' or c > 'Z') {
|
||||
if (c != '_') {
|
||||
is_uppercase_key = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_uppercase_key and key.len > 0) {
|
||||
return .{
|
||||
.key_value = .{ .key = key, .value = value },
|
||||
.rest = if (token_end < trimmed.len) trimmed[token_end..] else "",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return .{ .key_value = null, .rest = trimmed };
|
||||
}
|
||||
|
||||
fn addError(self: *Parser, message: []const u8) ParseError!void {
|
||||
try self.errors.append(self.allocator, .{
|
||||
.line = self.line_number,
|
||||
.message = message,
|
||||
});
|
||||
}
|
||||
|
||||
/// Get all parsing errors
|
||||
pub fn getErrors(self: *const Parser) []const ParseErrorInfo {
|
||||
return self.errors.items;
|
||||
}
|
||||
};
|
||||
|
||||
/// Parse a Sandboxfile from source text
|
||||
pub fn parse(allocator: Allocator, source: []const u8) ParseError!Sandboxfile {
|
||||
var parser = Parser.init(allocator, source);
|
||||
return parser.parse();
|
||||
}
|
||||
|
||||
/// Parse a Sandboxfile from a file path
|
||||
pub fn parseFile(allocator: Allocator, path: []const u8) !Sandboxfile {
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
|
||||
const source = try file.readToEndAlloc(allocator, 1024 * 1024); // 1MB max
|
||||
defer allocator.free(source);
|
||||
|
||||
return parse(allocator, source);
|
||||
}
|
||||
|
||||
// Tests
|
||||
test "parse basic Sandboxfile" {
|
||||
const source =
|
||||
\\# Sandboxfile
|
||||
\\
|
||||
\\FROM host
|
||||
\\WORKDIR .
|
||||
\\
|
||||
\\RUN bun install
|
||||
\\
|
||||
\\DEV PORT=3000 WATCH=src/** bun run dev
|
||||
\\SERVICE db PORT=5432 docker compose up postgres
|
||||
\\SERVICE redis PORT=6379 redis-server
|
||||
\\TEST bun test
|
||||
\\
|
||||
\\OUTPUT src/
|
||||
\\OUTPUT tests/
|
||||
\\OUTPUT package.json
|
||||
\\
|
||||
\\LOGS logs/*
|
||||
\\
|
||||
\\NET registry.npmjs.org
|
||||
\\NET api.stripe.com
|
||||
\\
|
||||
\\SECRET STRIPE_API_KEY
|
||||
;
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expectEqual(BaseEnv.host, result.base_env);
|
||||
try std.testing.expectEqualStrings(".", result.workdir);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 1), result.run_commands.items.len);
|
||||
try std.testing.expectEqualStrings("bun install", result.run_commands.items[0].command);
|
||||
|
||||
try std.testing.expect(result.dev != null);
|
||||
try std.testing.expectEqual(@as(u16, 3000), result.dev.?.port.?);
|
||||
try std.testing.expectEqualStrings("src/**", result.dev.?.watch.?);
|
||||
try std.testing.expectEqualStrings("bun run dev", result.dev.?.command);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 2), result.services.items.len);
|
||||
try std.testing.expectEqualStrings("db", result.services.items[0].name);
|
||||
try std.testing.expectEqual(@as(u16, 5432), result.services.items[0].port.?);
|
||||
try std.testing.expectEqualStrings("docker compose up postgres", result.services.items[0].command);
|
||||
|
||||
try std.testing.expectEqualStrings("redis", result.services.items[1].name);
|
||||
try std.testing.expectEqual(@as(u16, 6379), result.services.items[1].port.?);
|
||||
try std.testing.expectEqualStrings("redis-server", result.services.items[1].command);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 1), result.tests.items.len);
|
||||
try std.testing.expectEqualStrings("bun test", result.tests.items[0].command);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 3), result.outputs.items.len);
|
||||
try std.testing.expectEqualStrings("src/", result.outputs.items[0]);
|
||||
try std.testing.expectEqualStrings("tests/", result.outputs.items[1]);
|
||||
try std.testing.expectEqualStrings("package.json", result.outputs.items[2]);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 1), result.logs.items.len);
|
||||
try std.testing.expectEqualStrings("logs/*", result.logs.items[0]);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 2), result.allowed_hosts.items.len);
|
||||
try std.testing.expectEqualStrings("registry.npmjs.org", result.allowed_hosts.items[0]);
|
||||
try std.testing.expectEqualStrings("api.stripe.com", result.allowed_hosts.items[1]);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 1), result.secrets.items.len);
|
||||
try std.testing.expectEqualStrings("STRIPE_API_KEY", result.secrets.items[0]);
|
||||
}
|
||||
|
||||
test "parse INFER shorthand" {
|
||||
const source =
|
||||
\\FROM host
|
||||
\\WORKDIR .
|
||||
\\INFER *
|
||||
;
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expect(result.infer_enabled);
|
||||
try std.testing.expectEqualStrings("*", result.infer_pattern.?);
|
||||
}
|
||||
|
||||
test "parse FROM with image" {
|
||||
const source =
|
||||
\\FROM node:18-alpine
|
||||
\\WORKDIR /app
|
||||
;
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expectEqualStrings("node:18-alpine", result.base_env.image);
|
||||
try std.testing.expectEqualStrings("/app", result.workdir);
|
||||
}
|
||||
|
||||
test "parse multiple RUN commands" {
|
||||
const source =
|
||||
\\FROM host
|
||||
\\RUN apt-get update
|
||||
\\RUN apt-get install -y curl
|
||||
\\RUN bun install
|
||||
;
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 3), result.run_commands.items.len);
|
||||
try std.testing.expectEqualStrings("apt-get update", result.run_commands.items[0].command);
|
||||
try std.testing.expectEqualStrings("apt-get install -y curl", result.run_commands.items[1].command);
|
||||
try std.testing.expectEqualStrings("bun install", result.run_commands.items[2].command);
|
||||
}
|
||||
|
||||
test "parse DEV without options" {
|
||||
const source =
|
||||
\\FROM host
|
||||
\\DEV npm start
|
||||
;
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expect(result.dev != null);
|
||||
try std.testing.expect(result.dev.?.port == null);
|
||||
try std.testing.expect(result.dev.?.watch == null);
|
||||
try std.testing.expectEqualStrings("npm start", result.dev.?.command);
|
||||
}
|
||||
|
||||
test "handles CRLF line endings" {
|
||||
const source = "FROM host\r\nWORKDIR .\r\nRUN bun install\r\n";
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expectEqual(BaseEnv.host, result.base_env);
|
||||
try std.testing.expectEqualStrings(".", result.workdir);
|
||||
try std.testing.expectEqual(@as(usize, 1), result.run_commands.items.len);
|
||||
}
|
||||
|
||||
test "skips comments and empty lines" {
|
||||
const source =
|
||||
\\# This is a comment
|
||||
\\FROM host
|
||||
\\
|
||||
\\# Another comment
|
||||
\\WORKDIR .
|
||||
\\
|
||||
;
|
||||
|
||||
var result = try parse(std.testing.allocator, source);
|
||||
defer result.deinit(std.testing.allocator);
|
||||
|
||||
try std.testing.expectEqual(BaseEnv.host, result.base_env);
|
||||
try std.testing.expectEqualStrings(".", result.workdir);
|
||||
}
|
||||
257
test/js/bun/sandboxfile/sandboxfile.test.ts
Normal file
257
test/js/bun/sandboxfile/sandboxfile.test.ts
Normal file
@@ -0,0 +1,257 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe, tempDir } from "harness";
|
||||
|
||||
describe("Sandboxfile", () => {
|
||||
describe("parsing", () => {
|
||||
test("parses basic Sandboxfile with all directives", async () => {
|
||||
using dir = tempDir("sandboxfile-test", {
|
||||
Sandboxfile: `# Sandboxfile
|
||||
|
||||
FROM host
|
||||
WORKDIR .
|
||||
|
||||
RUN bun install
|
||||
|
||||
DEV PORT=3000 WATCH=src/** bun run dev
|
||||
SERVICE db PORT=5432 docker compose up postgres
|
||||
SERVICE redis PORT=6379 redis-server
|
||||
TEST bun test
|
||||
|
||||
OUTPUT src/
|
||||
OUTPUT tests/
|
||||
OUTPUT package.json
|
||||
|
||||
LOGS logs/*
|
||||
|
||||
NET registry.npmjs.org
|
||||
NET api.stripe.com
|
||||
|
||||
SECRET STRIPE_API_KEY
|
||||
`,
|
||||
});
|
||||
|
||||
const file = Bun.file(`${String(dir)}/Sandboxfile`);
|
||||
const content = await file.text();
|
||||
|
||||
expect(content).toContain("FROM host");
|
||||
expect(content).toContain("WORKDIR .");
|
||||
expect(content).toContain("RUN bun install");
|
||||
expect(content).toContain("DEV PORT=3000");
|
||||
expect(content).toContain("SERVICE db PORT=5432");
|
||||
expect(content).toContain("OUTPUT src/");
|
||||
expect(content).toContain("NET registry.npmjs.org");
|
||||
expect(content).toContain("SECRET STRIPE_API_KEY");
|
||||
});
|
||||
|
||||
test("parses INFER shorthand", async () => {
|
||||
using dir = tempDir("sandboxfile-infer", {
|
||||
Sandboxfile: `FROM host
|
||||
WORKDIR .
|
||||
INFER *
|
||||
`,
|
||||
});
|
||||
|
||||
const file = Bun.file(`${String(dir)}/Sandboxfile`);
|
||||
const content = await file.text();
|
||||
|
||||
expect(content).toContain("FROM host");
|
||||
expect(content).toContain("INFER *");
|
||||
});
|
||||
|
||||
test("parses FROM with container image", async () => {
|
||||
using dir = tempDir("sandboxfile-image", {
|
||||
Sandboxfile: `FROM node:18-alpine
|
||||
WORKDIR /app
|
||||
RUN npm install
|
||||
`,
|
||||
});
|
||||
|
||||
const file = Bun.file(`${String(dir)}/Sandboxfile`);
|
||||
const content = await file.text();
|
||||
|
||||
expect(content).toContain("FROM node:18-alpine");
|
||||
expect(content).toContain("WORKDIR /app");
|
||||
});
|
||||
|
||||
test("handles comments and empty lines", async () => {
|
||||
using dir = tempDir("sandboxfile-comments", {
|
||||
Sandboxfile: `# This is a Sandboxfile for a web application
|
||||
# Author: Test
|
||||
|
||||
FROM host
|
||||
WORKDIR .
|
||||
|
||||
# Install dependencies
|
||||
RUN bun install
|
||||
|
||||
# Development server configuration
|
||||
DEV PORT=3000 bun run dev
|
||||
`,
|
||||
});
|
||||
|
||||
const file = Bun.file(`${String(dir)}/Sandboxfile`);
|
||||
const content = await file.text();
|
||||
|
||||
expect(content).toContain("# This is a Sandboxfile");
|
||||
expect(content).toContain("FROM host");
|
||||
expect(content).toContain("RUN bun install");
|
||||
});
|
||||
});
|
||||
|
||||
describe("CLI", () => {
|
||||
test("sandbox --help shows usage", async () => {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox", "--help"],
|
||||
env: bunEnv,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stdout + stderr).toContain("Usage: bun sandbox");
|
||||
expect(stdout + stderr).toContain("--dry-run");
|
||||
expect(stdout + stderr).toContain("--test");
|
||||
});
|
||||
|
||||
test("sandbox without Sandboxfile shows error", async () => {
|
||||
using dir = tempDir("sandboxfile-missing", {});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("Sandboxfile not found");
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
test("sandbox --dry-run validates Sandboxfile", async () => {
|
||||
using dir = tempDir("sandboxfile-dryrun", {
|
||||
Sandboxfile: `FROM host
|
||||
WORKDIR .
|
||||
RUN echo "setup"
|
||||
TEST echo "test passed"
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox", "--dry-run"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("Parsed Sandboxfile");
|
||||
expect(stderr).toContain("FROM: host");
|
||||
expect(stderr).toContain("Sandboxfile is valid");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("sandbox runs RUN commands", async () => {
|
||||
using dir = tempDir("sandboxfile-run", {
|
||||
Sandboxfile: `FROM host
|
||||
WORKDIR .
|
||||
RUN echo "hello from sandbox" > output.txt
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("RUN echo");
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
// Verify the file was created
|
||||
const outputFile = Bun.file(`${String(dir)}/output.txt`);
|
||||
const outputContent = await outputFile.text();
|
||||
expect(outputContent.trim()).toBe("hello from sandbox");
|
||||
});
|
||||
|
||||
test("sandbox runs TEST commands", async () => {
|
||||
using dir = tempDir("sandboxfile-test-cmd", {
|
||||
Sandboxfile: `FROM host
|
||||
WORKDIR .
|
||||
TEST echo "tests passed"
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox", "--test"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("TEST echo");
|
||||
expect(stderr).toContain("tests passed");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("sandbox fails on RUN command failure", async () => {
|
||||
using dir = tempDir("sandboxfile-fail", {
|
||||
Sandboxfile: `FROM host
|
||||
WORKDIR .
|
||||
RUN exit 1
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("Setup failed");
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
test("sandbox sets BUN_SANDBOX environment variable", async () => {
|
||||
using dir = tempDir("sandboxfile-env", {
|
||||
Sandboxfile: `FROM host
|
||||
WORKDIR .
|
||||
RUN echo $BUN_SANDBOX > sandbox_env.txt
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "sandbox"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
// Verify the environment variable was set
|
||||
const envFile = Bun.file(`${String(dir)}/sandbox_env.txt`);
|
||||
const envContent = await envFile.text();
|
||||
expect(envContent.trim()).toBe("1");
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user