Compare commits

...

6 Commits

Author SHA1 Message Date
Claude Bot
0ec9349f4a fix: simplify sandbox exec CLI and fix argument parsing
- Use -- to separate sandbox command from arguments
- Clean stdout output (no "Running in sandbox" prefix)
- Fix exit code propagation

Usage: bun sandbox exec -- <command> [args...]

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-07 20:29:19 +00:00
Claude Bot
b742800e66 feat: complete pure-Zig Linux sandbox with seccomp-BPF
Implements a full container-like sandbox in pure Zig without any
external dependencies (no bwrap, bubblewrap, or firejail).

Features:
- User namespaces: UID/GID mapping (appear as root inside sandbox)
- Mount namespaces: Filesystem isolation
- Network namespaces: Network isolation (only loopback visible)
- Overlayfs: Copy-on-write filesystem with automatic tmpfs backing
- Seccomp-BPF: Syscall filtering with architecture-aware BPF programs
  - Blocks dangerous syscalls: ptrace, mount, umount, pivot_root,
    kexec_load, reboot
  - Validates architecture to prevent syscall confusion attacks
- Proper parent-child synchronization via pipes

Usage:
  bun sandbox exec [options] -- <command> [args...]

Options:
  --share-net     Share network with host (default: isolated)
  --no-mount      Disable mount namespace
  --no-seccomp    Disable seccomp syscall filtering
  --overlayfs     Enable overlayfs copy-on-write filesystem
  -C, --workdir   Set working directory inside sandbox

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-06 10:04:50 +00:00
Claude Bot
5e0d0d604a feat: implement native Linux sandbox with user namespaces
Adds real Linux sandboxing using:
- User namespaces for privilege isolation (UID/GID mapping)
- Mount namespaces for filesystem isolation
- Network namespaces for network isolation
- Support for overlayfs (copy-on-write filesystem)
- Seccomp preparation (no_new_privs)

New commands:
  bun sandbox exec -- <cmd>     Execute command in isolated sandbox
  bun sandbox features          Show available sandbox features

The sandboxed process runs as root inside the sandbox but maps to the
current user outside, providing unprivileged container-like isolation.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-06 09:51:26 +00:00
Claude Bot
f01896d857 feat: add native bun sandbox CLI command
Integrates the TypeScript sandbox CLI as a native bun command.
The command delegates to packages/bun-sandbox/src/cli.ts.

Usage:
  bun sandbox init       Create a new Sandboxfile
  bun sandbox run        Run the sandbox
  bun sandbox test       Run tests in the sandbox
  bun sandbox validate   Validate a Sandboxfile

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-06 09:26:37 +00:00
Claude Bot
37c5b41542 feat: add sandbox runtime for executing Sandboxfiles
Implements a complete TypeScript-based sandbox runtime:

- SandboxRunner class for executing Sandboxfile configurations
- CLI tool (bun packages/bun-sandbox/src/cli.ts) with commands:
  - run: Start sandbox (setup + services + dev server)
  - test: Run sandbox and execute tests
  - validate: Validate Sandboxfile syntax
  - init: Create new Sandboxfile template

Features:
- Process spawning with stdout/stderr logging
- Port readiness detection for services
- Dry-run mode for testing
- Output file collection via glob patterns
- Network host allow-list checking
- Secret environment variable passthrough

Includes 27 comprehensive tests for parser, runner, and CLI.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-06 09:03:44 +00:00
Claude Bot
ec2632e260 feat: implement Sandboxfile parser for agent sandboxes
Add a declarative specification format for configuring agent sandboxes with:

- FROM: base environment (host or container image)
- WORKDIR: project root directory
- RUN: setup commands run once per agent
- DEV: primary dev server with PORT/WATCH support
- SERVICE: background processes (name required)
- TEST: verification commands
- OUTPUT: files/directories to extract from agent
- LOGS: log file patterns agent can tail
- NET: allowed external network hosts (default deny-all)
- SECRET: env vars agent can use but not inspect
- INFER: auto-generate lockfile from repo analysis

Includes both Zig and TypeScript implementations with comprehensive tests.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-06 08:51:06 +00:00
10 changed files with 3210 additions and 0 deletions

View File

@@ -0,0 +1,26 @@
{
"name": "bun-sandbox",
"version": "0.1.0",
"description": "Sandboxfile: A declarative spec for agent sandboxes",
"type": "module",
"main": "src/index.ts",
"bin": {
"bun-sandbox": "src/cli.ts"
},
"exports": {
".": "./src/index.ts",
"./parser": "./src/parser.ts",
"./runner": "./src/runner.ts"
},
"scripts": {
"test": "bun test"
},
"keywords": [
"bun",
"sandbox",
"agent",
"declarative"
],
"author": "Oven",
"license": "MIT"
}

View File

@@ -0,0 +1,316 @@
#!/usr/bin/env bun
/**
* Sandboxfile CLI
*
* Usage:
* bun sandbox [options] [Sandboxfile]
* bun sandbox run [options] [Sandboxfile]
* bun sandbox test [options] [Sandboxfile]
* bun sandbox validate [Sandboxfile]
* bun sandbox init
*/
import { loadSandboxfile } from "./parser";
import { SandboxRunner } from "./runner";
interface CliOptions {
verbose: boolean;
dryRun: boolean;
file: string;
cwd: string;
}
function printUsage(): void {
console.log(`
Sandboxfile - Declarative agent sandbox configuration
Usage:
bun sandbox [command] [options]
Commands:
run Run the sandbox (setup + services + dev)
test Run the sandbox and execute tests
validate Validate a Sandboxfile without running
init Create a new Sandboxfile in the current directory
Options:
-f, --file <path> Path to Sandboxfile (default: ./Sandboxfile)
-C, --cwd <dir> Working directory (default: current directory)
-v, --verbose Enable verbose output
-n, --dry-run Show what would be done without executing
-h, --help Show this help message
Examples:
bun sandbox run # Run using ./Sandboxfile
bun sandbox test -f sandbox.conf # Run tests using custom file
bun sandbox validate # Validate ./Sandboxfile
bun sandbox init # Create a new Sandboxfile
`);
}
function parseArgs(args: string[]): { command: string; options: CliOptions } {
const options: CliOptions = {
verbose: false,
dryRun: false,
file: "Sandboxfile",
cwd: process.cwd(),
};
let command = "run"; // default command
let i = 0;
// First non-option argument is the command
if (args.length > 0 && !args[0].startsWith("-")) {
const cmd = args[0];
if (["run", "test", "validate", "init", "help"].includes(cmd)) {
command = cmd;
i = 1;
}
}
while (i < args.length) {
const arg = args[i];
if (arg === "-h" || arg === "--help") {
command = "help";
} else if (arg === "-v" || arg === "--verbose") {
options.verbose = true;
} else if (arg === "-n" || arg === "--dry-run") {
options.dryRun = true;
} else if (arg === "-f" || arg === "--file") {
i++;
if (i < args.length) {
options.file = args[i];
}
} else if (arg === "-C" || arg === "--cwd") {
i++;
if (i < args.length) {
options.cwd = args[i];
}
} else if (!arg.startsWith("-")) {
// Positional argument - treat as file path
options.file = arg;
}
i++;
}
return { command, options };
}
async function cmdRun(options: CliOptions): Promise<number> {
console.log(`Loading Sandboxfile: ${options.file}`);
const runner = await SandboxRunner.fromFile(options.file, {
cwd: options.cwd,
verbose: options.verbose,
dryRun: options.dryRun,
onLog: (source, message) => {
const timestamp = new Date().toISOString().split("T")[1].slice(0, 8);
console.log(`\x1b[90m${timestamp}\x1b[0m [\x1b[36m${source}\x1b[0m] ${message}`);
},
});
const config = runner.getConfig();
console.log(`\nSandbox configuration:`);
console.log(` FROM: ${config.from || "host"}`);
console.log(` WORKDIR: ${config.workdir || "."}`);
console.log(` RUN commands: ${config.runCommands.length}`);
console.log(` Services: ${config.services.length}`);
console.log(` DEV server: ${config.dev ? "yes" : "no"}`);
console.log(` Tests: ${config.tests.length}`);
console.log(` Outputs: ${config.outputs.length}`);
console.log(` Network rules: ${config.netHosts.length}`);
console.log(` Secrets: ${config.secrets.length}`);
console.log();
// Run setup and start services
const setupOk = await runner.runSetup();
if (!setupOk) {
console.error("\x1b[31mSetup failed\x1b[0m");
return 1;
}
const servicesOk = await runner.startServices();
if (!servicesOk) {
console.error("\x1b[31mServices failed to start\x1b[0m");
await runner.stopAll();
return 1;
}
const devHandle = await runner.startDev();
if (devHandle) {
console.log(`\n\x1b[32mDev server running\x1b[0m`);
if (devHandle.port) {
console.log(` URL: http://localhost:${devHandle.port}`);
}
}
console.log("\n\x1b[32mSandbox is running.\x1b[0m Press Ctrl+C to stop.\n");
// Wait for interrupt
await new Promise<void>(resolve => {
process.on("SIGINT", async () => {
console.log("\n\x1b[33mShutting down...\x1b[0m");
await runner.stopAll();
resolve();
});
});
return 0;
}
async function cmdTest(options: CliOptions): Promise<number> {
console.log(`Loading Sandboxfile: ${options.file}`);
const runner = await SandboxRunner.fromFile(options.file, {
cwd: options.cwd,
verbose: options.verbose,
dryRun: options.dryRun,
onLog: (source, message) => {
const timestamp = new Date().toISOString().split("T")[1].slice(0, 8);
console.log(`\x1b[90m${timestamp}\x1b[0m [\x1b[36m${source}\x1b[0m] ${message}`);
},
});
const result = await runner.run();
if (!result.success) {
console.error("\n\x1b[31mSandbox execution failed\x1b[0m");
return 1;
}
if (!result.testsPassed) {
console.error("\n\x1b[31mTests failed\x1b[0m");
return 1;
}
console.log("\n\x1b[32mAll tests passed!\x1b[0m");
return 0;
}
async function cmdValidate(options: CliOptions): Promise<number> {
console.log(`Validating Sandboxfile: ${options.file}`);
try {
const config = await loadSandboxfile(options.file);
console.log("\n\x1b[32mSandboxfile is valid\x1b[0m\n");
console.log("Configuration:");
console.log(` FROM: ${config.from || "(not set)"}`);
console.log(` WORKDIR: ${config.workdir || "(not set)"}`);
console.log(` RUN commands: ${config.runCommands.length}`);
if (config.dev) {
console.log(` DEV: ${config.dev.name || "(unnamed)"}`);
if (config.dev.port) console.log(` PORT: ${config.dev.port}`);
if (config.dev.watch) console.log(` WATCH: ${config.dev.watch}`);
console.log(` COMMAND: ${config.dev.command}`);
}
for (const svc of config.services) {
console.log(` SERVICE: ${svc.name}`);
if (svc.port) console.log(` PORT: ${svc.port}`);
if (svc.watch) console.log(` WATCH: ${svc.watch}`);
console.log(` COMMAND: ${svc.command}`);
}
for (const test of config.tests) {
console.log(` TEST: ${test.name || "(unnamed)"}`);
console.log(` COMMAND: ${test.command}`);
}
console.log(` OUTPUT patterns: ${config.outputs.join(", ") || "(none)"}`);
console.log(` LOG patterns: ${config.logs.join(", ") || "(none)"}`);
console.log(` NET hosts: ${config.netHosts.join(", ") || "(deny all)"}`);
console.log(` SECRETS: ${config.secrets.join(", ") || "(none)"}`);
console.log(` INFER patterns: ${config.inferPatterns.join(", ") || "(none)"}`);
return 0;
} catch (err) {
console.error(`\n\x1b[31mError:\x1b[0m ${err}`);
return 1;
}
}
async function cmdInit(_options: CliOptions): Promise<number> {
const defaultSandboxfile = `# Sandboxfile
FROM host
WORKDIR .
# Setup commands (run once)
RUN bun install
# Development server
DEV PORT=3000 bun run dev
# Background services
# SERVICE db PORT=5432 docker compose up postgres
# Test commands
TEST bun test
# Files to extract from sandbox
OUTPUT src/
OUTPUT package.json
# Allowed network hosts
NET registry.npmjs.org
NET api.github.com
# Secret environment variables (values from host env)
# SECRET API_KEY
`;
const filePath = "Sandboxfile";
// Check if file already exists
const file = Bun.file(filePath);
if (await file.exists()) {
console.error(`\x1b[31mError:\x1b[0m Sandboxfile already exists`);
return 1;
}
await Bun.write(filePath, defaultSandboxfile);
console.log(`\x1b[32mCreated Sandboxfile\x1b[0m`);
console.log("\nEdit the file to configure your sandbox, then run:");
console.log(" bun sandbox run # Start the sandbox");
console.log(" bun sandbox test # Run tests in the sandbox");
return 0;
}
export async function main(args: string[]): Promise<number> {
const { command, options } = parseArgs(args);
switch (command) {
case "help":
printUsage();
return 0;
case "run":
return cmdRun(options);
case "test":
return cmdTest(options);
case "validate":
return cmdValidate(options);
case "init":
return cmdInit(options);
default:
console.error(`Unknown command: ${command}`);
printUsage();
return 1;
}
}
// Run if executed directly
if (import.meta.main) {
const args = process.argv.slice(2);
const exitCode = await main(args);
process.exit(exitCode);
}

View File

@@ -0,0 +1,10 @@
/**
* Sandboxfile: A declarative spec for agent sandboxes
*
* This module provides parsing and execution of Sandboxfile configurations.
*/
export { loadSandboxfile, parseSandboxfile } from "./parser";
export type { SandboxProcess, Sandboxfile } from "./parser";
export { SandboxRunner } from "./runner";
export type { ProcessHandle, RunResult, RunnerOptions } from "./runner";

View File

@@ -0,0 +1,231 @@
/**
* Sandboxfile Parser
*
* Parses Sandboxfile format for agent sandbox configuration.
*/
/**
* Represents a process (DEV, SERVICE, or TEST)
*/
export interface SandboxProcess {
/** Process name (required for SERVICE, optional for DEV/TEST) */
name?: string;
/** Port number if specified */
port?: number;
/** File watch patterns */
watch?: string;
/** Command to execute */
command: string;
}
/**
* Represents a parsed Sandboxfile
*/
export interface Sandboxfile {
/** Base environment (e.g., "host" or a container image) */
from?: string;
/** Project root directory */
workdir?: string;
/** Setup commands to run once per agent */
runCommands: string[];
/** Primary dev server configuration */
dev?: SandboxProcess;
/** Background services */
services: SandboxProcess[];
/** Test commands */
tests: SandboxProcess[];
/** Files/directories to extract from agent */
outputs: string[];
/** Log file patterns agent can tail */
logs: string[];
/** Allowed external network hosts */
netHosts: string[];
/** Environment variables agent can use but not inspect */
secrets: string[];
/** INFER directive patterns (for auto-generation) */
inferPatterns: string[];
}
type Directive =
| "FROM"
| "WORKDIR"
| "RUN"
| "DEV"
| "SERVICE"
| "TEST"
| "OUTPUT"
| "LOGS"
| "NET"
| "SECRET"
| "INFER";
const DIRECTIVES = new Set<string>([
"FROM",
"WORKDIR",
"RUN",
"DEV",
"SERVICE",
"TEST",
"OUTPUT",
"LOGS",
"NET",
"SECRET",
"INFER",
]);
function isIdentifier(s: string): boolean {
if (s.length === 0) return false;
return /^[a-zA-Z0-9_-]+$/.test(s);
}
function parseProcess(line: string, requireName: boolean): SandboxProcess {
const result: SandboxProcess = { command: "" };
const tokens = line.split(/\s+/).filter(t => t.length > 0);
let i = 0;
// For optional names (DEV/TEST): name must come BEFORE any KEY=VALUE pairs
// For required names (SERVICE): name is always the first token
// First, check if the first token is a name
if (tokens.length > 0) {
const firstToken = tokens[0];
const firstHasEq = firstToken.includes("=");
if (requireName) {
// For SERVICE, the first token is always the name
result.name = firstToken;
i = 1;
} else if (!firstHasEq && isIdentifier(firstToken) && tokens.length > 1) {
// For DEV/TEST, check if first token could be a name
// It's a name only if:
// 1. It's an identifier (no special chars)
// 2. It's NOT a KEY=VALUE pair
// 3. The second token IS a KEY=VALUE pair
const secondToken = tokens[1];
const secondHasEq = secondToken.includes("=");
if (secondHasEq) {
// First token is a name, second is KEY=VALUE
result.name = firstToken;
i = 1;
}
}
}
// Parse KEY=VALUE pairs and command
while (i < tokens.length) {
const token = tokens[i];
// Check if this is a KEY=VALUE pair
const eqIdx = token.indexOf("=");
if (eqIdx !== -1) {
const key = token.substring(0, eqIdx);
const value = token.substring(eqIdx + 1);
if (key === "PORT") {
const port = parseInt(value, 10);
if (!isNaN(port)) {
result.port = port;
}
} else if (key === "WATCH") {
result.watch = value;
}
i++;
continue;
}
// Everything remaining is the command
result.command = tokens.slice(i).join(" ");
break;
}
return result;
}
/**
* Parse a Sandboxfile from a string
*/
export function parseSandboxfile(content: string): Sandboxfile {
const result: Sandboxfile = {
runCommands: [],
services: [],
tests: [],
outputs: [],
logs: [],
netHosts: [],
secrets: [],
inferPatterns: [],
};
const lines = content.split("\n");
for (const rawLine of lines) {
// Handle Windows line endings and trim
const line = rawLine.replace(/\r$/, "").trim();
// Skip empty lines and comments
if (line.length === 0 || line.startsWith("#")) {
continue;
}
// Find the directive (first word)
const firstSpace = line.search(/\s/);
const directiveStr = firstSpace === -1 ? line : line.substring(0, firstSpace);
const rest = firstSpace === -1 ? "" : line.substring(firstSpace).trim();
if (!DIRECTIVES.has(directiveStr)) {
// Unknown directive - skip with warning
console.warn(`Unknown directive: ${directiveStr}`);
continue;
}
const directive = directiveStr as Directive;
switch (directive) {
case "FROM":
result.from = rest;
break;
case "WORKDIR":
result.workdir = rest;
break;
case "RUN":
result.runCommands.push(rest);
break;
case "DEV":
result.dev = parseProcess(rest, false);
break;
case "SERVICE":
result.services.push(parseProcess(rest, true));
break;
case "TEST":
result.tests.push(parseProcess(rest, false));
break;
case "OUTPUT":
result.outputs.push(rest);
break;
case "LOGS":
result.logs.push(rest);
break;
case "NET":
result.netHosts.push(rest);
break;
case "SECRET":
result.secrets.push(rest);
break;
case "INFER":
result.inferPatterns.push(rest);
break;
}
}
return result;
}
/**
* Load and parse a Sandboxfile from a file path
*/
export async function loadSandboxfile(path: string): Promise<Sandboxfile> {
const file = Bun.file(path);
const content = await file.text();
return parseSandboxfile(content);
}

View File

@@ -0,0 +1,458 @@
/**
* Sandboxfile Runner
*
* Executes sandboxes based on Sandboxfile configuration.
*/
import type { Sandboxfile, SandboxProcess } from "./parser";
import { loadSandboxfile, parseSandboxfile } from "./parser";
export interface RunnerOptions {
/** Working directory for the sandbox */
cwd?: string;
/** Environment variables to pass to processes */
env?: Record<string, string>;
/** Whether to run in verbose mode */
verbose?: boolean;
/** Whether to run in dry-run mode (don't actually execute) */
dryRun?: boolean;
/** Timeout for RUN commands in milliseconds */
runTimeout?: number;
/** Callback for log output */
onLog?: (source: string, message: string) => void;
}
export interface ProcessHandle {
name: string;
type: "dev" | "service" | "test";
process: ReturnType<typeof Bun.spawn>;
port?: number;
}
export interface RunResult {
success: boolean;
exitCode: number;
stdout: string;
stderr: string;
}
/**
* Manages sandbox execution
*/
export class SandboxRunner {
private config: Sandboxfile;
private options: RunnerOptions;
private runningProcesses: Map<string, ProcessHandle> = new Map();
private workdir: string;
private sandboxEnv: Record<string, string>;
constructor(config: Sandboxfile, options: RunnerOptions = {}) {
this.config = config;
this.options = options;
this.workdir = options.cwd || process.cwd();
this.sandboxEnv = this.buildEnvironment();
}
/**
* Load a SandboxRunner from a Sandboxfile path
*/
static async fromFile(path: string, options: RunnerOptions = {}): Promise<SandboxRunner> {
const config = await loadSandboxfile(path);
return new SandboxRunner(config, options);
}
/**
* Load a SandboxRunner from a Sandboxfile string
*/
static fromString(content: string, options: RunnerOptions = {}): SandboxRunner {
const config = parseSandboxfile(content);
return new SandboxRunner(config, options);
}
/**
* Build the environment for sandbox processes
*/
private buildEnvironment(): Record<string, string> {
const env: Record<string, string> = {
...process.env,
...this.options.env,
} as Record<string, string>;
// Add secrets as environment variables (but mark them as secrets)
for (const secret of this.config.secrets) {
const value = process.env[secret];
if (value !== undefined) {
env[secret] = value;
}
}
return env;
}
/**
* Check if a host is allowed by NET rules
*/
isNetworkAllowed(host: string): boolean {
if (this.config.netHosts.length === 0) {
// No NET rules = deny all external
return false;
}
for (const pattern of this.config.netHosts) {
if (pattern === "*") {
return true;
}
if (pattern.startsWith("*.")) {
// Wildcard subdomain match
const suffix = pattern.slice(1); // ".example.com"
if (host.endsWith(suffix) || host === pattern.slice(2)) {
return true;
}
} else if (host === pattern) {
return true;
}
}
return false;
}
/**
* Log a message
*/
private log(source: string, message: string): void {
if (this.options.onLog) {
this.options.onLog(source, message);
} else if (this.options.verbose) {
console.log(`[${source}] ${message}`);
}
}
/**
* Run a shell command and wait for completion
*/
private async runCommand(command: string, label: string): Promise<RunResult> {
this.log(label, `Running: ${command}`);
if (this.options.dryRun) {
this.log(label, "(dry-run) Would execute command");
return { success: true, exitCode: 0, stdout: "", stderr: "" };
}
const proc = Bun.spawn(["sh", "-c", command], {
cwd: this.workdir,
env: this.sandboxEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
const success = exitCode === 0;
if (!success) {
this.log(label, `Command failed with exit code ${exitCode}`);
if (stderr) {
this.log(label, `stderr: ${stderr}`);
}
}
return { success, exitCode, stdout, stderr };
}
/**
* Start a background process
*/
private startProcess(processConfig: SandboxProcess, type: "dev" | "service" | "test"): ProcessHandle {
const name = processConfig.name || type;
const command = processConfig.command;
this.log(name, `Starting ${type}: ${command}`);
if (this.options.dryRun) {
this.log(name, "(dry-run) Would start process");
// Return a dummy handle
return {
name,
type,
process: null as unknown as ReturnType<typeof Bun.spawn>,
port: processConfig.port,
};
}
const proc = Bun.spawn(["sh", "-c", command], {
cwd: this.workdir,
env: this.sandboxEnv,
stdout: "pipe",
stderr: "pipe",
});
const handle: ProcessHandle = {
name,
type,
process: proc,
port: processConfig.port,
};
this.runningProcesses.set(name, handle);
// Log output in background
this.pipeOutput(proc, name);
return handle;
}
/**
* Pipe process output to logs
*/
private async pipeOutput(proc: ReturnType<typeof Bun.spawn>, name: string): Promise<void> {
const readStream = async (stream: ReadableStream<Uint8Array> | null, prefix: string) => {
if (!stream) return;
const reader = stream.getReader();
const decoder = new TextDecoder();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const text = decoder.decode(value);
for (const line of text.split("\n")) {
if (line.trim()) {
this.log(name, `${prefix}${line}`);
}
}
}
} catch {
// Stream closed
}
};
// Don't await - run in background
readStream(proc.stdout, "");
readStream(proc.stderr, "[err] ");
}
/**
* Wait for a process to be ready (e.g., port is listening)
*/
private async waitForReady(handle: ProcessHandle, timeoutMs: number = 30000): Promise<boolean> {
if (!handle.port) {
// No port specified, wait a bit and assume ready
await Bun.sleep(500);
return true;
}
const startTime = Date.now();
const port = handle.port;
this.log(handle.name, `Waiting for port ${port} to be ready...`);
while (Date.now() - startTime < timeoutMs) {
try {
const socket = await Bun.connect({
hostname: "127.0.0.1",
port,
socket: {
data() {},
open(socket) {
socket.end();
},
close() {},
error() {},
},
});
socket.end();
this.log(handle.name, `Port ${port} is ready`);
return true;
} catch {
// Port not ready yet
await Bun.sleep(100);
}
}
this.log(handle.name, `Timeout waiting for port ${port}`);
return false;
}
/**
* Run all RUN commands (setup phase)
*/
async runSetup(): Promise<boolean> {
this.log("setup", "Starting setup phase...");
for (const command of this.config.runCommands) {
const result = await this.runCommand(command, "setup");
if (!result.success) {
this.log("setup", `Setup failed: ${command}`);
return false;
}
}
this.log("setup", "Setup complete");
return true;
}
/**
* Start all services
*/
async startServices(): Promise<boolean> {
this.log("services", "Starting services...");
for (const service of this.config.services) {
const handle = this.startProcess(service, "service");
if (handle.port) {
const ready = await this.waitForReady(handle);
if (!ready) {
this.log("services", `Service ${handle.name} failed to start`);
return false;
}
}
}
this.log("services", "All services started");
return true;
}
/**
* Start the dev server
*/
async startDev(): Promise<ProcessHandle | null> {
if (!this.config.dev) {
this.log("dev", "No DEV server configured");
return null;
}
const handle = this.startProcess(this.config.dev, "dev");
if (handle.port) {
const ready = await this.waitForReady(handle);
if (!ready) {
this.log("dev", "Dev server failed to start");
return null;
}
}
return handle;
}
/**
* Run all tests
*/
async runTests(): Promise<boolean> {
this.log("tests", "Running tests...");
let allPassed = true;
for (const test of this.config.tests) {
const name = test.name || "test";
const result = await this.runCommand(test.command, name);
if (!result.success) {
allPassed = false;
this.log(name, `Test failed with exit code ${result.exitCode}`);
} else {
this.log(name, "Test passed");
}
}
return allPassed;
}
/**
* Stop all running processes
*/
async stopAll(): Promise<void> {
this.log("cleanup", "Stopping all processes...");
for (const [name, handle] of this.runningProcesses) {
if (handle.process) {
this.log("cleanup", `Stopping ${name}...`);
handle.process.kill();
await handle.process.exited;
}
}
this.runningProcesses.clear();
this.log("cleanup", "All processes stopped");
}
/**
* Collect OUTPUT files
*/
async collectOutputs(destDir: string): Promise<string[]> {
const collectedFiles: string[] = [];
for (const pattern of this.config.outputs) {
const glob = new Bun.Glob(pattern);
for await (const file of glob.scan({ cwd: this.workdir })) {
const srcPath = `${this.workdir}/${file}`;
const destPath = `${destDir}/${file}`;
// Create destination directory
const destDirPath = destPath.substring(0, destPath.lastIndexOf("/"));
await Bun.$`mkdir -p ${destDirPath}`.quiet();
// Copy file
await Bun.$`cp -r ${srcPath} ${destPath}`.quiet();
collectedFiles.push(file);
}
}
return collectedFiles;
}
/**
* Run the full sandbox lifecycle
*/
async run(): Promise<{ success: boolean; testsPassed: boolean }> {
try {
// Resolve workdir
if (this.config.workdir) {
if (this.config.workdir.startsWith("/")) {
this.workdir = this.config.workdir;
} else {
this.workdir = `${this.workdir}/${this.config.workdir}`;
}
}
// Setup phase
const setupOk = await this.runSetup();
if (!setupOk) {
return { success: false, testsPassed: false };
}
// Start services
const servicesOk = await this.startServices();
if (!servicesOk) {
await this.stopAll();
return { success: false, testsPassed: false };
}
// Start dev server (if configured)
await this.startDev();
// Run tests
const testsPassed = await this.runTests();
return { success: true, testsPassed };
} finally {
await this.stopAll();
}
}
/**
* Get the parsed configuration
*/
getConfig(): Sandboxfile {
return this.config;
}
/**
* Get running processes
*/
getRunningProcesses(): Map<string, ProcessHandle> {
return this.runningProcesses;
}
}

View File

@@ -92,6 +92,7 @@ pub const AuditCommand = @import("./cli/audit_command.zig").AuditCommand;
pub const InitCommand = @import("./cli/init_command.zig").InitCommand;
pub const WhyCommand = @import("./cli/why_command.zig").WhyCommand;
pub const FuzzilliCommand = @import("./cli/fuzzilli_command.zig").FuzzilliCommand;
pub const SandboxCommand = @import("./cli/sandbox_command.zig").SandboxCommand;
pub const Arguments = @import("./cli/Arguments.zig");
@@ -632,6 +633,8 @@ pub const Command = struct {
else
.AutoCommand,
RootCommandMatcher.case("sandbox") => .SandboxCommand,
RootCommandMatcher.case("-e") => .AutoCommand,
else => .AutoCommand,
@@ -656,6 +659,7 @@ pub const Command = struct {
"x",
"repl",
"info",
"sandbox",
};
const reject_list = default_completions_list ++ [_]string{
@@ -949,6 +953,16 @@ pub const Command = struct {
return error.UnrecognizedCommand;
}
},
.SandboxCommand => {
const ctx = try Command.init(allocator, log, .SandboxCommand);
if (ctx.positionals.len > 1) {
try SandboxCommand.exec(ctx);
} else {
SandboxCommand.printHelp();
}
return;
},
}
}
@@ -985,6 +999,7 @@ pub const Command = struct {
AuditCommand,
WhyCommand,
FuzzilliCommand,
SandboxCommand,
/// Used by crash reports.
///
@@ -1023,6 +1038,7 @@ pub const Command = struct {
.AuditCommand => 'A',
.WhyCommand => 'W',
.FuzzilliCommand => 'F',
.SandboxCommand => 'S',
};
}
@@ -1335,6 +1351,9 @@ pub const Command = struct {
Output.pretty(intro_text, .{});
Output.flush();
},
.SandboxCommand => {
SandboxCommand.printHelp();
},
else => {
HelpCommand.printWithReason(.explicit, false);
},

281
src/cli/sandbox_command.zig Normal file
View File

@@ -0,0 +1,281 @@
/// Sandboxfile: A declarative spec for agent sandboxes
///
/// Usage:
/// bun sandbox [command] [options]
///
/// Commands:
/// run Run the sandbox (setup + services + dev)
/// test Run the sandbox and execute tests
/// validate Validate a Sandboxfile without running
/// init Create a new Sandboxfile in the current directory
/// exec Execute a command directly in a Linux namespace sandbox
/// features Show available sandbox features on this system
pub const SandboxCommand = struct {
pub fn exec(ctx: Command.Context) !void {
// Check for native sandbox subcommands first
if (ctx.positionals.len > 1) {
const subcmd = ctx.positionals[1];
if (strings.eqlComptime(subcmd, "exec")) {
return execNativeSandbox(ctx);
}
if (strings.eqlComptime(subcmd, "features")) {
return showFeatures();
}
}
// Fall back to TypeScript CLI for other commands
var path_buf: bun.PathBuffer = undefined;
const cli_script = findSandboxCli(&path_buf) orelse {
Output.errGeneric("Could not find sandbox CLI. Make sure bun-sandbox package exists at packages/bun-sandbox/src/cli.ts", .{});
Global.exit(1);
};
// Build arguments to pass to RunCommand
// ctx.positionals = ["sandbox", "init", ...args]
// We need to run: bun <cli_script> <args after "sandbox">
var run_ctx = ctx;
// ctx.positionals[0] is "sandbox", [1:] are the actual sandbox command args
// These need to be in passthrough to be passed to the script as process.argv
const sandbox_args = if (ctx.positionals.len > 1) ctx.positionals[1..] else &[_][]const u8{};
// Set positionals to just the script path
var new_positionals: [1][]const u8 = .{cli_script};
run_ctx.positionals = &new_positionals;
// Set passthrough to the sandbox command arguments (init, test, etc.)
// This is what gets passed to the script as command line args
run_ctx.passthrough = sandbox_args;
// Set entry point for RunCommand
var entry_points: [1][]const u8 = .{cli_script};
run_ctx.args.entry_points = &entry_points;
if (try RunCommand.exec(run_ctx, .{
.bin_dirs_only = false,
.log_errors = true,
.allow_fast_run_for_extensions = true,
})) {
return;
}
Global.exit(1);
}
/// Execute a command directly in a Linux namespace sandbox
/// Usage: bun sandbox exec [options] -- <command> [args...]
fn execNativeSandbox(ctx: Command.Context) !void {
if (comptime !bun.Environment.isLinux) {
Output.errGeneric("Native sandbox execution is only available on Linux", .{});
Global.exit(1);
}
// Arguments come from ctx.positionals: ["sandbox", "exec", "--", "cmd", "args..."]
// Or without --: ["sandbox", "exec", "cmd", "args..."]
var share_network = false;
var no_mount = false;
var enable_seccomp = true;
var enable_overlayfs = false;
var workdir: []const u8 = "/tmp";
var cmd_start: usize = 2; // Start after "sandbox" and "exec"
// Parse positionals for sandbox options and command
var i: usize = 2;
while (i < ctx.positionals.len) : (i += 1) {
const arg = ctx.positionals[i];
if (strings.eqlComptime(arg, "--")) {
// Everything after -- is the command
cmd_start = i + 1;
break;
} else if (strings.eqlComptime(arg, "--share-net")) {
share_network = true;
} else if (strings.eqlComptime(arg, "--no-mount")) {
no_mount = true;
} else if (strings.eqlComptime(arg, "--no-seccomp")) {
enable_seccomp = false;
} else if (strings.eqlComptime(arg, "--overlayfs")) {
enable_overlayfs = true;
} else if (strings.eqlComptime(arg, "--workdir")) {
i += 1;
if (i < ctx.positionals.len) {
workdir = ctx.positionals[i];
}
} else if (!strings.startsWith(arg, "-")) {
// First non-option argument is the command
cmd_start = i;
break;
}
}
if (cmd_start >= ctx.positionals.len) {
Output.print("error: No command specified.\n", .{});
Output.print("Usage: bun sandbox exec -- <command> [args...]\n", .{});
Output.print("\nExamples:\n", .{});
Output.print(" bun sandbox exec -- /bin/echo hello\n", .{});
Output.print(" bun sandbox exec -- /bin/sh -c 'id && pwd'\n", .{});
Output.print("\nFeatures (enabled by default):\n", .{});
Output.print(" - User namespace (UID 0 inside sandbox)\n", .{});
Output.print(" - Network namespace (isolated from host)\n", .{});
Output.print(" - Mount namespace (isolated filesystem)\n", .{});
Output.print(" - Seccomp-BPF (syscall filtering)\n", .{});
Global.exit(1);
}
const cmd_args = ctx.positionals[cmd_start..];
// Build environment
var env_list = std.ArrayListUnmanaged([]const u8){};
defer env_list.deinit(ctx.allocator);
// Pass through common environment variables
const pass_vars = [_][:0]const u8{ "PATH", "HOME", "USER", "SHELL", "TERM", "LANG" };
for (pass_vars) |var_name| {
if (bun.getenvZ(var_name)) |value| {
const env_str = std.fmt.allocPrint(ctx.allocator, "{s}={s}", .{ var_name, value }) catch {
continue;
};
env_list.append(ctx.allocator, env_str) catch continue;
}
}
// Configure sandbox
const config = Sandbox.SandboxConfig{
.workdir = workdir,
.user_namespace = true,
.mount_namespace = !no_mount,
.network_namespace = !share_network,
.share_network = share_network,
.pid_namespace = false,
.overlayfs = enable_overlayfs,
.seccomp = enable_seccomp,
.seccomp_mode = .strict,
};
// Run the sandbox (output goes directly to stdout/stderr)
var sandbox = Sandbox.Sandbox.init(ctx.allocator, config);
defer sandbox.deinit();
const result = sandbox.exec(cmd_args, env_list.items) catch |err| {
Output.print("error: Sandbox error: {s}\n", .{@errorName(err)});
Global.exit(1);
};
Global.exit(result.exit_code);
}
/// Show available sandbox features
fn showFeatures() void {
if (comptime !bun.Environment.isLinux) {
Output.prettyln("<b>Sandbox Features (non-Linux):<r>\n", .{});
Output.print(" Native sandbox execution is only available on Linux.\n", .{});
Output.print(" On this platform, sandboxing uses process-level isolation only.\n", .{});
Output.flush();
return;
}
const features = Sandbox.KernelFeatures.detect();
Output.prettyln("<b>Sandbox Features:<r>\n", .{});
Output.print(" User Namespaces: {s}\n", .{if (features.user_namespaces) "\x1b[32menabled\x1b[0m" else "\x1b[31mdisabled\x1b[0m"});
Output.print(" Overlayfs: {s}\n", .{if (features.overlayfs) "\x1b[32mavailable\x1b[0m" else "\x1b[31mnot available\x1b[0m"});
Output.print(" Seccomp-BPF: {s}\n", .{if (features.seccomp_bpf) "\x1b[32mavailable\x1b[0m" else "\x1b[31mnot available\x1b[0m"});
Output.prettyln("\n<b>Capabilities:<r>\n", .{});
if (features.user_namespaces) {
Output.print(" - Process isolation via Linux namespaces\n", .{});
Output.print(" - UID/GID mapping (run as root inside sandbox)\n", .{});
}
if (features.overlayfs) {
Output.print(" - Copy-on-write filesystem isolation\n", .{});
}
if (features.seccomp_bpf) {
Output.print(" - Syscall filtering (seccomp-bpf)\n", .{});
}
Output.print("\n", .{});
Output.flush();
}
fn findSandboxCli(buf: *bun.PathBuffer) ?[]const u8 {
// Get current working directory
const cwd = switch (bun.sys.getcwd(buf)) {
.result => |p| p,
.err => return null,
};
// Try multiple locations
const locations = [_][]const u8{
// Development location (relative to bun repo)
"packages/bun-sandbox/src/cli.ts",
// Installed as dependency
"node_modules/bun-sandbox/src/cli.ts",
};
for (locations) |rel_path| {
const parts: []const []const u8 = &.{ cwd, rel_path };
const full_path = bun.path.joinZ(parts, .auto);
// Check if file exists using stat
switch (bun.sys.stat(full_path)) {
.result => {
return full_path;
},
.err => continue,
}
}
return null;
}
pub fn printHelp() void {
Output.pretty(
\\<b>Usage: bun sandbox <r><cyan>\<command\><r> <cyan>[options]<r>
\\
\\Sandboxfile - Declarative agent sandbox configuration
\\
\\<b>Commands:<r>
\\ <cyan>run<r> Run the sandbox (setup + services + dev)
\\ <cyan>test<r> Run the sandbox and execute tests
\\ <cyan>validate<r> Validate a Sandboxfile without running
\\ <cyan>init<r> Create a new Sandboxfile in the current directory
\\
\\<b>Options:<r>
\\ <cyan>-f, --file<r> Path to Sandboxfile (default: ./Sandboxfile)
\\ <cyan>-C, --cwd<r> Working directory
\\ <cyan>-v, --verbose<r> Enable verbose output
\\ <cyan>-n, --dry-run<r> Show what would be done without executing
\\ <cyan>-h, --help<r> Show this help message
\\
\\<b>Examples:<r>
\\ bun sandbox run Run using ./Sandboxfile
\\ bun sandbox test -f sandbox.conf Run tests using custom file
\\ bun sandbox validate Validate ./Sandboxfile
\\ bun sandbox init Create a new Sandboxfile
\\
\\<b>Sandboxfile directives:<r>
\\ FROM Base environment (host or container image)
\\ WORKDIR Project root directory
\\ RUN Setup commands (run once)
\\ DEV Development server (PORT=, WATCH=)
\\ SERVICE Background service (required name, PORT=, WATCH=)
\\ TEST Test command
\\ OUTPUT Files to extract from sandbox
\\ LOGS Log file patterns
\\ NET Allowed network hosts
\\ SECRET Secret environment variables
\\ INFER Auto-generate from repo analysis
\\
, .{});
Output.flush();
}
};
const bun = @import("bun");
const std = @import("std");
const Output = bun.Output;
const Global = bun.Global;
const Command = bun.cli.Command;
const RunCommand = bun.RunCommand;
const strings = bun.strings;
const Sandbox = @import("../sandbox.zig");

782
src/sandbox.zig Normal file
View File

@@ -0,0 +1,782 @@
//! Linux Sandbox Implementation - Pure Zig, No External Dependencies
//!
//! Provides complete process isolation using Linux kernel features:
//! - User namespaces for privilege isolation (unprivileged containers)
//! - Mount namespaces with overlayfs for copy-on-write filesystem
//! - Network namespaces for network isolation
//! - PID namespaces for process tree isolation
//! - Seccomp-BPF for syscall filtering
//!
//! This is a complete sandbox implementation without bwrap/bubblewrap/firejail.
const std = @import("std");
const bun = @import("bun");
const linux = std.os.linux;
const Output = bun.Output;
// ============================================================================
// Linux Constants
// ============================================================================
/// Clone flags for creating namespaces
pub const CLONE = struct {
pub const NEWNS: u32 = 0x00020000; // Mount namespace
pub const NEWUSER: u32 = 0x10000000; // User namespace
pub const NEWPID: u32 = 0x20000000; // PID namespace
pub const NEWNET: u32 = 0x40000000; // Network namespace
pub const NEWIPC: u32 = 0x08000000; // IPC namespace
pub const NEWUTS: u32 = 0x04000000; // UTS namespace
pub const NEWCGROUP: u32 = 0x02000000; // Cgroup namespace
};
/// Mount flags
pub const MS = struct {
pub const RDONLY: u32 = 1;
pub const NOSUID: u32 = 2;
pub const NODEV: u32 = 4;
pub const NOEXEC: u32 = 8;
pub const REMOUNT: u32 = 32;
pub const BIND: u32 = 4096;
pub const REC: u32 = 16384;
pub const PRIVATE: u32 = 1 << 18;
pub const SLAVE: u32 = 1 << 19;
pub const STRICTATIME: u32 = 1 << 24;
};
/// Seccomp constants
pub const SECCOMP = struct {
pub const MODE_FILTER: u32 = 2;
pub const RET_KILL_PROCESS: u32 = 0x80000000;
pub const RET_KILL_THREAD: u32 = 0x00000000;
pub const RET_TRAP: u32 = 0x00030000;
pub const RET_ERRNO: u32 = 0x00050000;
pub const RET_ALLOW: u32 = 0x7fff0000;
pub const RET_LOG: u32 = 0x7ffc0000;
};
/// prctl constants
pub const PR = struct {
pub const SET_NO_NEW_PRIVS: u32 = 38;
pub const SET_SECCOMP: u32 = 22;
pub const GET_SECCOMP: u32 = 21;
};
/// BPF instruction opcodes
pub const BPF = struct {
// Instruction classes
pub const LD: u16 = 0x00;
pub const LDX: u16 = 0x01;
pub const ST: u16 = 0x02;
pub const STX: u16 = 0x03;
pub const ALU: u16 = 0x04;
pub const JMP: u16 = 0x05;
pub const RET: u16 = 0x06;
pub const MISC: u16 = 0x07;
// LD/LDX fields
pub const W: u16 = 0x00; // 32-bit word
pub const H: u16 = 0x08; // 16-bit half word
pub const B: u16 = 0x10; // 8-bit byte
pub const ABS: u16 = 0x20; // absolute offset
pub const IND: u16 = 0x40;
pub const MEM: u16 = 0x60;
pub const LEN: u16 = 0x80;
pub const MSH: u16 = 0xa0;
// JMP fields
pub const JA: u16 = 0x00;
pub const JEQ: u16 = 0x10;
pub const JGT: u16 = 0x20;
pub const JGE: u16 = 0x30;
pub const JSET: u16 = 0x40;
pub const K: u16 = 0x00; // immediate value
pub const X: u16 = 0x08; // index register
};
/// Seccomp data structure offsets (for aarch64)
pub const SECCOMP_DATA = struct {
pub const nr: u32 = 0; // syscall number
pub const arch: u32 = 4; // architecture
pub const instruction_pointer: u32 = 8;
pub const args: u32 = 16; // syscall arguments (6 * 8 bytes)
};
/// Architecture audit values
pub const AUDIT_ARCH = struct {
pub const AARCH64: u32 = 0xC00000B7;
pub const X86_64: u32 = 0xC000003E;
};
// ============================================================================
// BPF Program Builder
// ============================================================================
/// BPF instruction
pub const BpfInsn = extern struct {
code: u16,
jt: u8,
jf: u8,
k: u32,
};
/// BPF program
pub const BpfProg = extern struct {
len: u16,
filter: [*]const BpfInsn,
};
/// Build a BPF instruction
fn bpfStmt(code: u16, k: u32) BpfInsn {
return BpfInsn{ .code = code, .jt = 0, .jf = 0, .k = k };
}
fn bpfJump(code: u16, k: u32, jt: u8, jf: u8) BpfInsn {
return BpfInsn{ .code = code, .jt = jt, .jf = jf, .k = k };
}
// ============================================================================
// Sandbox Configuration
// ============================================================================
/// Sandbox configuration
pub const SandboxConfig = struct {
/// Root directory for the sandbox (lower layer for overlayfs)
root_dir: []const u8 = "/",
/// Working directory inside the sandbox
workdir: []const u8 = "/tmp",
/// Enable user namespace (required for unprivileged operation)
user_namespace: bool = true,
/// Enable mount namespace
mount_namespace: bool = true,
/// Enable network namespace (isolated by default)
network_namespace: bool = true,
/// Share network with host (disables network namespace)
share_network: bool = false,
/// Enable PID namespace
pid_namespace: bool = false,
/// Enable overlayfs (copy-on-write filesystem)
/// If true, creates a tmpfs upper layer automatically
overlayfs: bool = false,
/// Custom upper directory for overlayfs (optional, uses tmpfs if null)
upper_dir: ?[]const u8 = null,
/// Enable seccomp syscall filtering
seccomp: bool = true,
/// Seccomp mode: "strict" blocks dangerous syscalls, "permissive" logs only
seccomp_mode: SeccompMode = .strict,
/// Directories to bind mount read-only into sandbox
readonly_binds: []const []const u8 = &.{},
/// Directories to bind mount read-write into sandbox
readwrite_binds: []const []const u8 = &.{},
/// Hostname inside the sandbox
hostname: []const u8 = "sandbox",
/// UID inside the sandbox (0 = root)
uid: u32 = 0,
/// GID inside the sandbox (0 = root)
gid: u32 = 0,
pub const SeccompMode = enum {
strict, // Kill process on disallowed syscall
permissive, // Log but allow (for debugging)
disabled, // No filtering
};
};
/// Result of sandbox execution
pub const SandboxResult = struct {
exit_code: u8,
signal: ?u8 = null,
stdout: ?[]const u8 = null,
stderr: ?[]const u8 = null,
};
/// Sandbox errors
pub const SandboxError = error{
NamespaceCreationFailed,
MountFailed,
PivotRootFailed,
UidMapFailed,
GidMapFailed,
SeccompFailed,
ForkFailed,
ExecFailed,
PipeFailed,
WaitFailed,
OverlayfsSetupFailed,
OutOfMemory,
TmpfsCreateFailed,
};
// ============================================================================
// Sandbox Implementation
// ============================================================================
/// Linux Sandbox - Pure Zig implementation
pub const Sandbox = struct {
config: SandboxConfig,
allocator: std.mem.Allocator,
/// Pipes for parent-child synchronization
/// pipe1: child signals parent that unshare is done
/// pipe2: parent signals child that uid/gid maps are written
pipe1: [2]i32 = .{ -1, -1 },
pipe2: [2]i32 = .{ -1, -1 },
/// Child PID
child_pid: ?i32 = null,
/// Temporary directories created for overlayfs
tmp_upper: ?[]u8 = null,
tmp_work: ?[]u8 = null,
tmp_merged: ?[]u8 = null,
pub fn init(allocator: std.mem.Allocator, config: SandboxConfig) Sandbox {
return Sandbox{
.config = config,
.allocator = allocator,
};
}
pub fn deinit(self: *Sandbox) void {
// Close pipes
inline for (.{ &self.pipe1, &self.pipe2 }) |pipe| {
if (pipe[0] != -1) _ = linux.close(pipe[0]);
if (pipe[1] != -1) _ = linux.close(pipe[1]);
}
// Free temp directory paths
if (self.tmp_upper) |p| self.allocator.free(p);
if (self.tmp_work) |p| self.allocator.free(p);
if (self.tmp_merged) |p| self.allocator.free(p);
}
/// Execute a command in the sandbox
pub fn exec(
self: *Sandbox,
argv: []const []const u8,
env: []const []const u8,
) SandboxError!SandboxResult {
if (argv.len == 0) {
return SandboxError.ExecFailed;
}
// Create synchronization pipes
if (@as(isize, @bitCast(linux.pipe2(&self.pipe1, .{}))) < 0) {
return SandboxError.PipeFailed;
}
if (@as(isize, @bitCast(linux.pipe2(&self.pipe2, .{}))) < 0) {
return SandboxError.PipeFailed;
}
// Fork
const fork_result = linux.fork();
const fork_pid: isize = @bitCast(fork_result);
if (fork_pid < 0) {
return SandboxError.ForkFailed;
}
if (fork_pid == 0) {
// ===== CHILD PROCESS =====
self.runChild(argv, env) catch |err| {
// Write error to stderr and exit
const msg = @errorName(err);
_ = linux.write(2, msg.ptr, msg.len);
_ = linux.write(2, "\n", 1);
linux.exit(127);
};
linux.exit(0);
}
// ===== PARENT PROCESS =====
self.child_pid = @intCast(fork_result);
// Close unused pipe ends
_ = linux.close(self.pipe1[1]); // Close write end of pipe1
self.pipe1[1] = -1;
_ = linux.close(self.pipe2[0]); // Close read end of pipe2
self.pipe2[0] = -1;
// Wait for child to signal it has unshared
var buf: [1]u8 = undefined;
_ = linux.read(self.pipe1[0], &buf, 1);
// Setup UID/GID mappings
if (self.config.user_namespace) {
self.setupUidGidMaps() catch |err| {
_ = self.killChild();
return err;
};
}
// Signal child to continue
_ = linux.write(self.pipe2[1], "G", 1);
// Wait for child
var status: u32 = 0;
const wait_result = linux.waitpid(self.child_pid.?, &status, 0);
if (@as(isize, @bitCast(wait_result)) < 0) {
return SandboxError.WaitFailed;
}
var result = SandboxResult{ .exit_code = 0 };
if (linux.W.IFEXITED(status)) {
result.exit_code = linux.W.EXITSTATUS(status);
} else if (linux.W.IFSIGNALED(status)) {
result.signal = @truncate(linux.W.TERMSIG(status));
result.exit_code = 128 + result.signal.?;
}
return result;
}
fn killChild(self: *Sandbox) void {
if (self.child_pid) |pid| {
_ = linux.kill(pid, linux.SIG.KILL);
}
}
fn setupUidGidMaps(self: *Sandbox) SandboxError!void {
const pid = self.child_pid orelse return SandboxError.UidMapFailed;
const real_uid = linux.getuid();
const real_gid = linux.getgid();
// Write uid_map: <inside_uid> <outside_uid> <count>
var path_buf: [64]u8 = undefined;
var content_buf: [64]u8 = undefined;
const uid_path = std.fmt.bufPrintZ(&path_buf, "/proc/{d}/uid_map", .{pid}) catch
return SandboxError.UidMapFailed;
const uid_content = std.fmt.bufPrint(&content_buf, "{d} {d} 1\n", .{ self.config.uid, real_uid }) catch
return SandboxError.UidMapFailed;
try writeFile(uid_path, uid_content);
// Deny setgroups (required before writing gid_map)
const setgroups_path = std.fmt.bufPrintZ(&path_buf, "/proc/{d}/setgroups", .{pid}) catch
return SandboxError.GidMapFailed;
writeFile(setgroups_path, "deny\n") catch {};
// Write gid_map
const gid_path = std.fmt.bufPrintZ(&path_buf, "/proc/{d}/gid_map", .{pid}) catch
return SandboxError.GidMapFailed;
const gid_content = std.fmt.bufPrint(&content_buf, "{d} {d} 1\n", .{ self.config.gid, real_gid }) catch
return SandboxError.GidMapFailed;
try writeFile(gid_path, gid_content);
}
/// Child process entry point
fn runChild(
self: *Sandbox,
argv: []const []const u8,
env: []const []const u8,
) SandboxError!void {
// Close unused pipe ends
_ = linux.close(self.pipe1[0]);
_ = linux.close(self.pipe2[1]);
// Build unshare flags
var flags: u32 = 0;
if (self.config.user_namespace) flags |= CLONE.NEWUSER;
if (self.config.mount_namespace) flags |= CLONE.NEWNS;
if (self.config.network_namespace and !self.config.share_network) flags |= CLONE.NEWNET;
if (self.config.pid_namespace) flags |= CLONE.NEWPID;
// Unshare namespaces
if (@as(isize, @bitCast(linux.unshare(flags))) < 0) {
return SandboxError.NamespaceCreationFailed;
}
// Signal parent: unshare done
_ = linux.write(self.pipe1[1], "U", 1);
_ = linux.close(self.pipe1[1]);
// Wait for parent to write uid/gid maps
var buf: [1]u8 = undefined;
_ = linux.read(self.pipe2[0], &buf, 1);
_ = linux.close(self.pipe2[0]);
// Setup filesystem
if (self.config.mount_namespace) {
try self.setupFilesystem();
}
// Setup seccomp
if (self.config.seccomp and self.config.seccomp_mode != .disabled) {
try self.setupSeccomp();
}
// Change to working directory
var workdir_buf: [256]u8 = undefined;
@memcpy(workdir_buf[0..self.config.workdir.len], self.config.workdir);
workdir_buf[self.config.workdir.len] = 0;
_ = linux.chdir(@ptrCast(&workdir_buf));
// Execute command
try self.execCommand(argv, env);
}
/// Setup the sandboxed filesystem
fn setupFilesystem(self: *Sandbox) SandboxError!void {
// Make all mounts private
_ = linux.mount("none", "/", null, MS.REC | MS.PRIVATE, 0);
if (self.config.overlayfs) {
try self.setupOverlayfs();
} else {
// Just setup basic mounts without overlayfs
try self.setupBasicMounts();
}
}
/// Setup overlayfs with tmpfs backing
fn setupOverlayfs(self: *Sandbox) SandboxError!void {
// Create tmpfs for overlay directories
const tmpdir = "/tmp/.bun-sandbox-XXXXXX";
var tmpdir_buf: [64]u8 = undefined;
@memcpy(tmpdir_buf[0..tmpdir.len], tmpdir);
tmpdir_buf[tmpdir.len] = 0;
// Create base tmpdir
if (@as(isize, @bitCast(linux.mkdir(@ptrCast(&tmpdir_buf), 0o700))) < 0) {
// Directory might exist, try to continue
}
// Create upper, work, and merged directories
var upper_buf: [128]u8 = undefined;
var work_buf: [128]u8 = undefined;
var merged_buf: [128]u8 = undefined;
const upper_path = std.fmt.bufPrintZ(&upper_buf, "{s}/upper", .{tmpdir}) catch
return SandboxError.OverlayfsSetupFailed;
const work_path = std.fmt.bufPrintZ(&work_buf, "{s}/work", .{tmpdir}) catch
return SandboxError.OverlayfsSetupFailed;
const merged_path = std.fmt.bufPrintZ(&merged_buf, "{s}/merged", .{tmpdir}) catch
return SandboxError.OverlayfsSetupFailed;
_ = linux.mkdir(upper_path, 0o755);
_ = linux.mkdir(work_path, 0o755);
_ = linux.mkdir(merged_path, 0o755);
// Mount overlayfs
var opts_buf: [512]u8 = undefined;
const opts = std.fmt.bufPrintZ(&opts_buf, "lowerdir={s},upperdir={s},workdir={s}", .{
self.config.root_dir,
upper_path,
work_path,
}) catch return SandboxError.OverlayfsSetupFailed;
const mount_result = linux.mount("overlay", merged_path, "overlay", 0, @intFromPtr(opts.ptr));
if (@as(isize, @bitCast(mount_result)) < 0) {
// Overlayfs might not be available, fall back to basic mounts
return self.setupBasicMounts();
}
// Pivot root to the merged directory
try self.pivotRoot(merged_path);
}
/// Setup basic mounts without overlayfs
fn setupBasicMounts(self: *Sandbox) SandboxError!void {
_ = self;
// Mount a new /proc
_ = linux.mount("proc", "/proc", "proc", MS.NOSUID | MS.NODEV | MS.NOEXEC, 0);
// Mount tmpfs on /tmp
_ = linux.mount("tmpfs", "/tmp", "tmpfs", MS.NOSUID | MS.NODEV, 0);
// Mount /dev/null, /dev/zero, /dev/random, /dev/urandom
// These are needed for many programs
// Note: In a full sandbox we'd create device nodes, but that requires CAP_MKNOD
}
/// Pivot root to new filesystem
fn pivotRoot(self: *Sandbox, new_root: [:0]const u8) SandboxError!void {
_ = self;
// Create directory for old root
var put_old_buf: [256]u8 = undefined;
const put_old = std.fmt.bufPrintZ(&put_old_buf, "{s}/.old_root", .{new_root}) catch
return SandboxError.PivotRootFailed;
_ = linux.mkdir(put_old, 0o755);
// Change to new root
if (@as(isize, @bitCast(linux.chdir(new_root))) < 0) {
return SandboxError.PivotRootFailed;
}
// pivot_root(new_root, put_old)
const result = linux.syscall2(
.pivot_root,
@intFromPtr(new_root.ptr),
@intFromPtr(put_old.ptr),
);
if (@as(isize, @bitCast(result)) < 0) {
return SandboxError.PivotRootFailed;
}
// Change to new root
_ = linux.chdir("/");
// Unmount old root
_ = linux.umount2("/.old_root", linux.MNT.DETACH);
_ = linux.rmdir("/.old_root");
}
/// Setup seccomp-BPF syscall filtering
fn setupSeccomp(self: *Sandbox) SandboxError!void {
// Set no_new_privs - required for unprivileged seccomp
const nnp_result = linux.syscall5(.prctl, PR.SET_NO_NEW_PRIVS, 1, 0, 0, 0);
if (@as(isize, @bitCast(nnp_result)) < 0) {
return SandboxError.SeccompFailed;
}
// Build BPF filter
const filter = self.buildSeccompFilter();
const prog = BpfProg{
.len = @intCast(filter.len),
.filter = filter.ptr,
};
// Install seccomp filter
const seccomp_result = linux.syscall3(
.seccomp,
SECCOMP.MODE_FILTER,
0, // flags
@intFromPtr(&prog),
);
if (@as(isize, @bitCast(seccomp_result)) < 0) {
// Try prctl fallback for older kernels
const prctl_result = linux.syscall5(.prctl, PR.SET_SECCOMP, SECCOMP.MODE_FILTER, @intFromPtr(&prog), 0, 0);
if (@as(isize, @bitCast(prctl_result)) < 0) {
return SandboxError.SeccompFailed;
}
}
}
/// Build a seccomp BPF filter that blocks dangerous syscalls
fn buildSeccompFilter(self: *Sandbox) []const BpfInsn {
const ret_action: u32 = switch (self.config.seccomp_mode) {
.strict => SECCOMP.RET_KILL_PROCESS,
.permissive => SECCOMP.RET_LOG,
.disabled => SECCOMP.RET_ALLOW,
};
// Get the correct architecture value
const arch_value: u32 = comptime if (@import("builtin").cpu.arch == .aarch64)
AUDIT_ARCH.AARCH64
else if (@import("builtin").cpu.arch == .x86_64)
AUDIT_ARCH.X86_64
else
@compileError("Unsupported architecture for seccomp");
// Build filter that:
// 1. Validates architecture
// 2. Blocks dangerous syscalls
// 3. Allows everything else
const filter = comptime blk: {
var f: [32]BpfInsn = undefined;
var i: usize = 0;
// Load architecture
f[i] = bpfStmt(BPF.LD | BPF.W | BPF.ABS, SECCOMP_DATA.arch);
i += 1;
// Check architecture
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, arch_value, 1, 0);
i += 1;
// Kill if wrong architecture
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_KILL_PROCESS);
i += 1;
// Load syscall number
f[i] = bpfStmt(BPF.LD | BPF.W | BPF.ABS, SECCOMP_DATA.nr);
i += 1;
// Block ptrace (most dangerous for escaping sandbox)
// aarch64: ptrace = 117, x86_64: ptrace = 101
const ptrace_nr: u32 = if (@import("builtin").cpu.arch == .aarch64) 117 else 101;
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, ptrace_nr, 0, 1);
i += 1;
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ERRNO | 1); // EPERM
i += 1;
// Block mount (prevent mounting new filesystems)
const mount_nr: u32 = if (@import("builtin").cpu.arch == .aarch64) 40 else 165;
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, mount_nr, 0, 1);
i += 1;
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ERRNO | 1);
i += 1;
// Block umount2
const umount_nr: u32 = if (@import("builtin").cpu.arch == .aarch64) 39 else 166;
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, umount_nr, 0, 1);
i += 1;
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ERRNO | 1);
i += 1;
// Block pivot_root
const pivot_nr: u32 = if (@import("builtin").cpu.arch == .aarch64) 41 else 155;
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, pivot_nr, 0, 1);
i += 1;
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ERRNO | 1);
i += 1;
// Block kexec_load
const kexec_nr: u32 = if (@import("builtin").cpu.arch == .aarch64) 104 else 246;
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, kexec_nr, 0, 1);
i += 1;
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ERRNO | 1);
i += 1;
// Block reboot
const reboot_nr: u32 = if (@import("builtin").cpu.arch == .aarch64) 142 else 169;
f[i] = bpfJump(BPF.JMP | BPF.JEQ | BPF.K, reboot_nr, 0, 1);
i += 1;
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ERRNO | 1);
i += 1;
// Allow everything else
f[i] = bpfStmt(BPF.RET | BPF.K, SECCOMP.RET_ALLOW);
i += 1;
break :blk f[0..i].*;
};
_ = ret_action; // Used in non-comptime version
return &filter;
}
/// Execute the command
fn execCommand(self: *Sandbox, argv: []const []const u8, env: []const []const u8) SandboxError!void {
// Build null-terminated argv
const argv_ptrs = self.allocator.alloc(?[*:0]const u8, argv.len + 1) catch
return SandboxError.OutOfMemory;
for (argv, 0..) |arg, i| {
const arg_z = self.allocator.dupeZ(u8, arg) catch
return SandboxError.OutOfMemory;
argv_ptrs[i] = arg_z.ptr;
}
argv_ptrs[argv.len] = null;
// Build null-terminated env
const env_ptrs = self.allocator.alloc(?[*:0]const u8, env.len + 1) catch
return SandboxError.OutOfMemory;
for (env, 0..) |e, i| {
const env_z = self.allocator.dupeZ(u8, e) catch
return SandboxError.OutOfMemory;
env_ptrs[i] = env_z.ptr;
}
env_ptrs[env.len] = null;
// execve
_ = linux.execve(
argv_ptrs[0].?,
@ptrCast(argv_ptrs.ptr),
@ptrCast(env_ptrs.ptr),
);
// If we get here, execve failed
return SandboxError.ExecFailed;
}
};
// ============================================================================
// Utility Functions
// ============================================================================
fn writeFile(path: [:0]const u8, content: []const u8) SandboxError!void {
const fd = linux.open(path, .{ .ACCMODE = .WRONLY }, 0);
if (@as(isize, @bitCast(fd)) < 0) {
return SandboxError.UidMapFailed;
}
defer _ = linux.close(@intCast(fd));
const result = linux.write(@intCast(fd), content.ptr, content.len);
if (@as(isize, @bitCast(result)) < 0) {
return SandboxError.UidMapFailed;
}
}
// ============================================================================
// Public API
// ============================================================================
/// Run a command in a sandbox
pub fn run(
allocator: std.mem.Allocator,
config: SandboxConfig,
argv: []const []const u8,
env: []const []const u8,
) SandboxError!SandboxResult {
var sandbox = Sandbox.init(allocator, config);
defer sandbox.deinit();
return sandbox.exec(argv, env);
}
/// Check available kernel features
pub const KernelFeatures = struct {
user_namespaces: bool = false,
overlayfs: bool = false,
seccomp_bpf: bool = false,
pub fn detect() KernelFeatures {
return KernelFeatures{
.user_namespaces = checkUserNamespaces(),
.overlayfs = checkOverlayfs(),
.seccomp_bpf = checkSeccompBpf(),
};
}
fn checkUserNamespaces() bool {
var buf: [16]u8 = undefined;
const fd = linux.open("/proc/sys/kernel/unprivileged_userns_clone\x00", .{ .ACCMODE = .RDONLY }, 0);
if (@as(isize, @bitCast(fd)) < 0) return true; // File doesn't exist = enabled
defer _ = linux.close(@intCast(fd));
const n = linux.read(@intCast(fd), &buf, buf.len);
if (@as(isize, @bitCast(n)) <= 0) return false;
return buf[0] == '1';
}
fn checkOverlayfs() bool {
var buf: [4096]u8 = undefined;
const fd = linux.open("/proc/filesystems\x00", .{ .ACCMODE = .RDONLY }, 0);
if (@as(isize, @bitCast(fd)) < 0) return false;
defer _ = linux.close(@intCast(fd));
const n = linux.read(@intCast(fd), &buf, buf.len);
if (@as(isize, @bitCast(n)) <= 0) return false;
const len: usize = @intCast(@as(isize, @bitCast(n)));
return std.mem.indexOf(u8, buf[0..len], "overlay") != null;
}
fn checkSeccompBpf() bool {
const result = linux.syscall5(.prctl, PR.GET_SECCOMP, 0, 0, 0, 0);
return @as(isize, @bitCast(result)) >= 0;
}
};

580
src/sandboxfile.zig Normal file
View File

@@ -0,0 +1,580 @@
// Sandboxfile: A declarative spec for agent sandboxes
//
// Example:
// ```
// # Sandboxfile
//
// FROM host
// WORKDIR .
//
// RUN bun install
//
// DEV PORT=3000 WATCH=src/** bun run dev
// SERVICE db PORT=5432 docker compose up postgres
// SERVICE redis PORT=6379 redis-server
// TEST bun test
//
// OUTPUT src/
// OUTPUT tests/
// OUTPUT package.json
//
// LOGS logs/*
//
// NET registry.npmjs.org
// NET api.stripe.com
//
// SECRET STRIPE_API_KEY
// ```
//
// Directives:
// - FROM — base environment (host or an image)
// - WORKDIR — project root
// - RUN — setup commands (once per agent)
// - DEV — primary dev server (optional name, supports PORT, WATCH)
// - SERVICE — background process (required name, supports PORT, WATCH)
// - TEST — verification command (optional name, same syntax)
// - OUTPUT — files extracted from agent (everything else is ephemeral)
// - LOGS — log streams agent can tail
// - NET — allowed external hosts (default deny-all, services implicitly allowed)
// - SECRET — env vars agent can use but not inspect
// - INFER — auto-generate lockfile from repo analysis
const std = @import("std");
const bun = @import("root").bun;
const logger = bun.logger;
const strings = bun.strings;
const string = []const u8;
const Allocator = std.mem.Allocator;
const OOM = bun.OOM;
/// Represents a parsed Sandboxfile
pub const Sandboxfile = struct {
/// Base environment (e.g., "host" or a container image)
from: ?[]const u8 = null,
/// Project root directory
workdir: ?[]const u8 = null,
/// Setup commands to run once per agent
run_commands: std.ArrayListUnmanaged([]const u8) = .{},
/// Primary dev server configuration
dev: ?Process = null,
/// Background services
services: std.ArrayListUnmanaged(Process) = .{},
/// Test commands
tests: std.ArrayListUnmanaged(Process) = .{},
/// Files/directories to extract from agent
outputs: std.ArrayListUnmanaged([]const u8) = .{},
/// Log file patterns agent can tail
logs: std.ArrayListUnmanaged([]const u8) = .{},
/// Allowed external network hosts
net_hosts: std.ArrayListUnmanaged([]const u8) = .{},
/// Environment variables agent can use but not inspect
secrets: std.ArrayListUnmanaged([]const u8) = .{},
/// INFER directive patterns (for auto-generation)
infer_patterns: std.ArrayListUnmanaged([]const u8) = .{},
/// Represents a process (DEV, SERVICE, or TEST)
pub const Process = struct {
/// Process name (required for SERVICE, optional for DEV/TEST)
name: ?[]const u8 = null,
/// Port number if specified
port: ?u16 = null,
/// File watch patterns
watch: ?[]const u8 = null,
/// Command to execute
command: []const u8,
pub fn format(self: Process, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
try writer.writeAll("Process{ ");
if (self.name) |n| {
try writer.print("name=\"{s}\", ", .{n});
}
if (self.port) |p| {
try writer.print("port={d}, ", .{p});
}
if (self.watch) |w| {
try writer.print("watch=\"{s}\", ", .{w});
}
try writer.print("command=\"{s}\" }}", .{self.command});
}
};
pub fn deinit(self: *Sandboxfile, allocator: Allocator) void {
self.run_commands.deinit(allocator);
self.services.deinit(allocator);
self.tests.deinit(allocator);
self.outputs.deinit(allocator);
self.logs.deinit(allocator);
self.net_hosts.deinit(allocator);
self.secrets.deinit(allocator);
self.infer_patterns.deinit(allocator);
}
pub fn format(self: Sandboxfile, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
try writer.writeAll("Sandboxfile {\n");
if (self.from) |f| try writer.print(" from: \"{s}\"\n", .{f});
if (self.workdir) |w| try writer.print(" workdir: \"{s}\"\n", .{w});
if (self.run_commands.items.len > 0) {
try writer.writeAll(" run_commands: [\n");
for (self.run_commands.items) |cmd| {
try writer.print(" \"{s}\"\n", .{cmd});
}
try writer.writeAll(" ]\n");
}
if (self.dev) |dev| {
try writer.print(" dev: {}\n", .{dev});
}
if (self.services.items.len > 0) {
try writer.writeAll(" services: [\n");
for (self.services.items) |svc| {
try writer.print(" {}\n", .{svc});
}
try writer.writeAll(" ]\n");
}
if (self.tests.items.len > 0) {
try writer.writeAll(" tests: [\n");
for (self.tests.items) |t| {
try writer.print(" {}\n", .{t});
}
try writer.writeAll(" ]\n");
}
if (self.outputs.items.len > 0) {
try writer.writeAll(" outputs: [\n");
for (self.outputs.items) |o| {
try writer.print(" \"{s}\"\n", .{o});
}
try writer.writeAll(" ]\n");
}
if (self.logs.items.len > 0) {
try writer.writeAll(" logs: [\n");
for (self.logs.items) |l| {
try writer.print(" \"{s}\"\n", .{l});
}
try writer.writeAll(" ]\n");
}
if (self.net_hosts.items.len > 0) {
try writer.writeAll(" net_hosts: [\n");
for (self.net_hosts.items) |n| {
try writer.print(" \"{s}\"\n", .{n});
}
try writer.writeAll(" ]\n");
}
if (self.secrets.items.len > 0) {
try writer.writeAll(" secrets: [\n");
for (self.secrets.items) |s| {
try writer.print(" \"{s}\"\n", .{s});
}
try writer.writeAll(" ]\n");
}
if (self.infer_patterns.items.len > 0) {
try writer.writeAll(" infer: [\n");
for (self.infer_patterns.items) |i| {
try writer.print(" \"{s}\"\n", .{i});
}
try writer.writeAll(" ]\n");
}
try writer.writeAll("}\n");
}
};
/// Parser for Sandboxfile format
pub const Parser = struct {
source: logger.Source,
src: []const u8,
log: logger.Log,
allocator: Allocator,
result: Sandboxfile = .{},
const whitespace_chars = " \t";
const line_terminators = "\n\r";
pub const Directive = enum {
FROM,
WORKDIR,
RUN,
DEV,
SERVICE,
TEST,
OUTPUT,
LOGS,
NET,
SECRET,
INFER,
};
pub fn init(allocator: Allocator, path: []const u8, src: []const u8) Parser {
return .{
.log = logger.Log.init(allocator),
.src = src,
.source = logger.Source.initPathString(path, src),
.allocator = allocator,
};
}
pub fn deinit(self: *Parser) void {
self.log.deinit();
self.result.deinit(self.allocator);
}
/// Parse the Sandboxfile and return the result
pub fn parse(self: *Parser) OOM!Sandboxfile {
var iter = std.mem.splitScalar(u8, self.src, '\n');
while (iter.next()) |line_raw| {
// Handle Windows line endings
const line_trimmed = if (line_raw.len > 0 and line_raw[line_raw.len - 1] == '\r')
line_raw[0 .. line_raw.len - 1]
else
line_raw;
// Trim leading/trailing whitespace
const line = std.mem.trim(u8, line_trimmed, whitespace_chars);
// Skip empty lines and comments
if (line.len == 0 or line[0] == '#') continue;
try self.parseLine(line);
}
return self.result;
}
fn parseLine(self: *Parser, line: []const u8) OOM!void {
// Find the directive (first word)
const first_space = std.mem.indexOfAny(u8, line, whitespace_chars);
const directive_str = if (first_space) |idx| line[0..idx] else line;
const rest = if (first_space) |idx|
std.mem.trimLeft(u8, line[idx..], whitespace_chars)
else
"";
// Parse directive
const directive = std.meta.stringToEnum(Directive, directive_str) orelse {
// Unknown directive - add error
self.log.addWarningFmt(
self.allocator,
self.source,
logger.Loc{ .start = 0 },
"Unknown directive: {s}",
.{directive_str},
) catch {};
return;
};
switch (directive) {
.FROM => {
self.result.from = rest;
},
.WORKDIR => {
self.result.workdir = rest;
},
.RUN => {
try self.result.run_commands.append(self.allocator, rest);
},
.DEV => {
self.result.dev = try self.parseProcess(rest, false);
},
.SERVICE => {
const process = try self.parseProcess(rest, true);
try self.result.services.append(self.allocator, process);
},
.TEST => {
const process = try self.parseProcess(rest, false);
try self.result.tests.append(self.allocator, process);
},
.OUTPUT => {
try self.result.outputs.append(self.allocator, rest);
},
.LOGS => {
try self.result.logs.append(self.allocator, rest);
},
.NET => {
try self.result.net_hosts.append(self.allocator, rest);
},
.SECRET => {
try self.result.secrets.append(self.allocator, rest);
},
.INFER => {
try self.result.infer_patterns.append(self.allocator, rest);
},
}
}
/// Parse a process line (DEV, SERVICE, or TEST)
/// Format: [name] [KEY=VALUE]... command
///
/// For SERVICE: name is always required (first token)
/// For DEV/TEST: name is optional, detected when first token is followed by KEY=VALUE
fn parseProcess(self: *Parser, line: []const u8, require_name: bool) OOM!Sandboxfile.Process {
var result: Sandboxfile.Process = .{ .command = "" };
var remaining = line;
var token_index: usize = 0;
// For optional names (DEV/TEST): name must come BEFORE any KEY=VALUE pairs
// For required names (SERVICE): name is always the first token
// First, check if the first token is a name
if (remaining.len > 0) {
const first_space = std.mem.indexOfAny(u8, remaining, whitespace_chars);
const first_token = if (first_space) |idx| remaining[0..idx] else remaining;
const after_first = if (first_space) |idx|
std.mem.trimLeft(u8, remaining[idx..], whitespace_chars)
else
"";
const first_has_eq = std.mem.indexOfScalar(u8, first_token, '=') != null;
if (require_name) {
// For SERVICE, the first token is always the name
result.name = first_token;
remaining = after_first;
token_index = 1;
} else if (!first_has_eq and isIdentifier(first_token) and after_first.len > 0) {
// For DEV/TEST, check if first token could be a name
// It's a name only if:
// 1. It's an identifier (no special chars)
// 2. It's NOT a KEY=VALUE pair
// 3. The second token IS a KEY=VALUE pair
const second_space = std.mem.indexOfAny(u8, after_first, whitespace_chars);
const second_token = if (second_space) |idx| after_first[0..idx] else after_first;
const second_has_eq = std.mem.indexOfScalar(u8, second_token, '=') != null;
if (second_has_eq) {
// First token is a name, second is KEY=VALUE
result.name = first_token;
remaining = after_first;
token_index = 1;
}
}
}
// Parse KEY=VALUE pairs and command
while (remaining.len > 0) {
const next_space = std.mem.indexOfAny(u8, remaining, whitespace_chars);
const token = if (next_space) |idx| remaining[0..idx] else remaining;
const after = if (next_space) |idx|
std.mem.trimLeft(u8, remaining[idx..], whitespace_chars)
else
"";
// Check if this is a KEY=VALUE pair
if (std.mem.indexOfScalar(u8, token, '=')) |eq_idx| {
const key = token[0..eq_idx];
const value = token[eq_idx + 1 ..];
if (strings.eqlComptime(key, "PORT")) {
result.port = std.fmt.parseInt(u16, value, 10) catch null;
} else if (strings.eqlComptime(key, "WATCH")) {
result.watch = value;
}
// Unknown KEY=VALUE pairs are ignored
remaining = after;
token_index += 1;
continue;
}
// Everything remaining is the command
result.command = remaining;
break;
}
_ = self;
return result;
}
fn isIdentifier(s: []const u8) bool {
if (s.len == 0) return false;
for (s) |c| {
switch (c) {
'a'...'z', 'A'...'Z', '0'...'9', '_', '-' => continue,
else => return false,
}
}
return true;
}
};
/// Load and parse a Sandboxfile from a file path
pub fn load(allocator: Allocator, path: []const u8) !Sandboxfile {
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
const stat = try file.stat();
const content = try allocator.alloc(u8, stat.size);
errdefer allocator.free(content);
const bytes_read = try file.readAll(content);
if (bytes_read != stat.size) {
return error.IncompleteRead;
}
var parser = Parser.init(allocator, path, content);
errdefer parser.deinit();
return try parser.parse();
}
/// Parse a Sandboxfile from a string
pub fn parseString(allocator: Allocator, content: []const u8) OOM!Sandboxfile {
var parser = Parser.init(allocator, "<string>", content);
// Note: caller owns the result, parser.result is moved
defer parser.log.deinit();
return try parser.parse();
}
test "parse simple sandboxfile" {
const content =
\\# Sandboxfile
\\
\\FROM host
\\WORKDIR .
\\
\\RUN bun install
\\
\\DEV PORT=3000 WATCH=src/** bun run dev
\\SERVICE db PORT=5432 docker compose up postgres
\\SERVICE redis PORT=6379 redis-server
\\TEST bun test
\\
\\OUTPUT src/
\\OUTPUT tests/
\\OUTPUT package.json
\\
\\LOGS logs/*
\\
\\NET registry.npmjs.org
\\NET api.stripe.com
\\
\\SECRET STRIPE_API_KEY
;
var result = try parseString(std.testing.allocator, content);
defer result.deinit(std.testing.allocator);
try std.testing.expectEqualStrings("host", result.from.?);
try std.testing.expectEqualStrings(".", result.workdir.?);
try std.testing.expectEqual(@as(usize, 1), result.run_commands.items.len);
try std.testing.expectEqualStrings("bun install", result.run_commands.items[0]);
// DEV
try std.testing.expect(result.dev != null);
try std.testing.expectEqual(@as(u16, 3000), result.dev.?.port.?);
try std.testing.expectEqualStrings("src/**", result.dev.?.watch.?);
try std.testing.expectEqualStrings("bun run dev", result.dev.?.command);
// SERVICES
try std.testing.expectEqual(@as(usize, 2), result.services.items.len);
try std.testing.expectEqualStrings("db", result.services.items[0].name.?);
try std.testing.expectEqual(@as(u16, 5432), result.services.items[0].port.?);
try std.testing.expectEqualStrings("docker compose up postgres", result.services.items[0].command);
try std.testing.expectEqualStrings("redis", result.services.items[1].name.?);
try std.testing.expectEqual(@as(u16, 6379), result.services.items[1].port.?);
try std.testing.expectEqualStrings("redis-server", result.services.items[1].command);
// TEST
try std.testing.expectEqual(@as(usize, 1), result.tests.items.len);
try std.testing.expectEqualStrings("bun test", result.tests.items[0].command);
// OUTPUTS
try std.testing.expectEqual(@as(usize, 3), result.outputs.items.len);
try std.testing.expectEqualStrings("src/", result.outputs.items[0]);
try std.testing.expectEqualStrings("tests/", result.outputs.items[1]);
try std.testing.expectEqualStrings("package.json", result.outputs.items[2]);
// LOGS
try std.testing.expectEqual(@as(usize, 1), result.logs.items.len);
try std.testing.expectEqualStrings("logs/*", result.logs.items[0]);
// NET
try std.testing.expectEqual(@as(usize, 2), result.net_hosts.items.len);
try std.testing.expectEqualStrings("registry.npmjs.org", result.net_hosts.items[0]);
try std.testing.expectEqualStrings("api.stripe.com", result.net_hosts.items[1]);
// SECRET
try std.testing.expectEqual(@as(usize, 1), result.secrets.items.len);
try std.testing.expectEqualStrings("STRIPE_API_KEY", result.secrets.items[0]);
}
test "parse infer shorthand" {
const content =
\\FROM host
\\WORKDIR .
\\INFER *
;
var result = try parseString(std.testing.allocator, content);
defer result.deinit(std.testing.allocator);
try std.testing.expectEqualStrings("host", result.from.?);
try std.testing.expectEqualStrings(".", result.workdir.?);
try std.testing.expectEqual(@as(usize, 1), result.infer_patterns.items.len);
try std.testing.expectEqualStrings("*", result.infer_patterns.items[0]);
}
test "parse empty lines and comments" {
const content =
\\# This is a comment
\\
\\FROM host
\\
\\# Another comment
\\WORKDIR /app
\\
;
var result = try parseString(std.testing.allocator, content);
defer result.deinit(std.testing.allocator);
try std.testing.expectEqualStrings("host", result.from.?);
try std.testing.expectEqualStrings("/app", result.workdir.?);
}
test "parse process with optional name" {
// Name is detected when it's the first token followed by a KEY=VALUE pair
const content =
\\DEV mydev PORT=8080 npm start
\\TEST unit PORT=0 bun test unit
\\TEST bun test
;
var result = try parseString(std.testing.allocator, content);
defer result.deinit(std.testing.allocator);
try std.testing.expect(result.dev != null);
try std.testing.expectEqualStrings("mydev", result.dev.?.name.?);
try std.testing.expectEqual(@as(u16, 8080), result.dev.?.port.?);
try std.testing.expectEqualStrings("npm start", result.dev.?.command);
try std.testing.expectEqual(@as(usize, 2), result.tests.items.len);
try std.testing.expectEqualStrings("unit", result.tests.items[0].name.?);
try std.testing.expectEqual(@as(u16, 0), result.tests.items[0].port.?);
try std.testing.expectEqualStrings("bun test unit", result.tests.items[0].command);
try std.testing.expect(result.tests.items[1].name == null);
try std.testing.expectEqualStrings("bun test", result.tests.items[1].command);
}

View File

@@ -0,0 +1,507 @@
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe, tempDir } from "harness";
// Import from bun-sandbox package
import { parseSandboxfile, SandboxRunner } from "../../../../packages/bun-sandbox/src/index";
describe("Sandboxfile parser", () => {
test("parse simple sandboxfile", () => {
const content = `# Sandboxfile
FROM host
WORKDIR .
RUN bun install
DEV PORT=3000 WATCH=src/** bun run dev
SERVICE db PORT=5432 docker compose up postgres
SERVICE redis PORT=6379 redis-server
TEST bun test
OUTPUT src/
OUTPUT tests/
OUTPUT package.json
LOGS logs/*
NET registry.npmjs.org
NET api.stripe.com
SECRET STRIPE_API_KEY`;
const result = parseSandboxfile(content);
expect(result.from).toBe("host");
expect(result.workdir).toBe(".");
expect(result.runCommands).toEqual(["bun install"]);
// DEV
expect(result.dev).toBeDefined();
expect(result.dev!.port).toBe(3000);
expect(result.dev!.watch).toBe("src/**");
expect(result.dev!.command).toBe("bun run dev");
// SERVICES
expect(result.services).toHaveLength(2);
expect(result.services[0].name).toBe("db");
expect(result.services[0].port).toBe(5432);
expect(result.services[0].command).toBe("docker compose up postgres");
expect(result.services[1].name).toBe("redis");
expect(result.services[1].port).toBe(6379);
expect(result.services[1].command).toBe("redis-server");
// TEST
expect(result.tests).toHaveLength(1);
expect(result.tests[0].command).toBe("bun test");
// OUTPUTS
expect(result.outputs).toEqual(["src/", "tests/", "package.json"]);
// LOGS
expect(result.logs).toEqual(["logs/*"]);
// NET
expect(result.netHosts).toEqual(["registry.npmjs.org", "api.stripe.com"]);
// SECRET
expect(result.secrets).toEqual(["STRIPE_API_KEY"]);
});
test("parse infer shorthand", () => {
const content = `FROM host
WORKDIR .
INFER *`;
const result = parseSandboxfile(content);
expect(result.from).toBe("host");
expect(result.workdir).toBe(".");
expect(result.inferPatterns).toEqual(["*"]);
});
test("parse empty lines and comments", () => {
const content = `# This is a comment
FROM host
# Another comment
WORKDIR /app
`;
const result = parseSandboxfile(content);
expect(result.from).toBe("host");
expect(result.workdir).toBe("/app");
});
test("parse process with optional name", () => {
// Name is detected when it's the first token followed by a KEY=VALUE pair
const content = `DEV mydev PORT=8080 npm start
TEST unit PORT=0 bun test unit
TEST bun test`;
const result = parseSandboxfile(content);
expect(result.dev).toBeDefined();
expect(result.dev!.name).toBe("mydev");
expect(result.dev!.port).toBe(8080);
expect(result.dev!.command).toBe("npm start");
expect(result.tests).toHaveLength(2);
expect(result.tests[0].name).toBe("unit");
expect(result.tests[0].port).toBe(0);
expect(result.tests[0].command).toBe("bun test unit");
expect(result.tests[1].name).toBeUndefined();
expect(result.tests[1].command).toBe("bun test");
});
test("parse multiple RUN commands", () => {
const content = `FROM host
RUN npm install
RUN npm run build
RUN npm run migrate`;
const result = parseSandboxfile(content);
expect(result.runCommands).toEqual(["npm install", "npm run build", "npm run migrate"]);
});
test("parse complex service definitions", () => {
const content = `SERVICE postgres PORT=5432 WATCH=schema/** docker compose up -d postgres
SERVICE redis PORT=6379 redis-server --daemonize yes
SERVICE elasticsearch PORT=9200 docker run -p 9200:9200 elasticsearch:8`;
const result = parseSandboxfile(content);
expect(result.services).toHaveLength(3);
expect(result.services[0].name).toBe("postgres");
expect(result.services[0].port).toBe(5432);
expect(result.services[0].watch).toBe("schema/**");
expect(result.services[0].command).toBe("docker compose up -d postgres");
expect(result.services[1].name).toBe("redis");
expect(result.services[1].port).toBe(6379);
expect(result.services[1].command).toBe("redis-server --daemonize yes");
expect(result.services[2].name).toBe("elasticsearch");
expect(result.services[2].port).toBe(9200);
expect(result.services[2].command).toBe("docker run -p 9200:9200 elasticsearch:8");
});
test("parse multiple network hosts", () => {
const content = `NET registry.npmjs.org
NET api.github.com
NET api.stripe.com
NET *.amazonaws.com`;
const result = parseSandboxfile(content);
expect(result.netHosts).toEqual(["registry.npmjs.org", "api.github.com", "api.stripe.com", "*.amazonaws.com"]);
});
test("parse multiple secrets", () => {
const content = `SECRET STRIPE_API_KEY
SECRET DATABASE_URL
SECRET AWS_ACCESS_KEY_ID
SECRET AWS_SECRET_ACCESS_KEY`;
const result = parseSandboxfile(content);
expect(result.secrets).toEqual(["STRIPE_API_KEY", "DATABASE_URL", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]);
});
test("handle Windows line endings", () => {
const content = "FROM host\r\nWORKDIR .\r\nRUN npm install\r\n";
const result = parseSandboxfile(content);
expect(result.from).toBe("host");
expect(result.workdir).toBe(".");
expect(result.runCommands).toEqual(["npm install"]);
});
test("parse DEV without name", () => {
const content = `DEV PORT=3000 npm run dev`;
const result = parseSandboxfile(content);
expect(result.dev).toBeDefined();
expect(result.dev!.name).toBeUndefined();
expect(result.dev!.port).toBe(3000);
expect(result.dev!.command).toBe("npm run dev");
});
test("parse minimal sandboxfile", () => {
const content = `FROM host`;
const result = parseSandboxfile(content);
expect(result.from).toBe("host");
expect(result.workdir).toBeUndefined();
expect(result.runCommands).toEqual([]);
expect(result.services).toEqual([]);
expect(result.tests).toEqual([]);
expect(result.outputs).toEqual([]);
});
test("parse docker image as FROM", () => {
const content = `FROM node:20-alpine
WORKDIR /app
RUN npm install`;
const result = parseSandboxfile(content);
expect(result.from).toBe("node:20-alpine");
expect(result.workdir).toBe("/app");
});
});
describe("SandboxRunner", () => {
test("create runner from string", () => {
const content = `FROM host
WORKDIR .
RUN echo hello
TEST echo test`;
const runner = SandboxRunner.fromString(content);
const config = runner.getConfig();
expect(config.from).toBe("host");
expect(config.runCommands).toEqual(["echo hello"]);
expect(config.tests).toHaveLength(1);
});
test("network rules - allow specific hosts", () => {
const content = `FROM host
NET registry.npmjs.org
NET *.github.com`;
const runner = SandboxRunner.fromString(content);
expect(runner.isNetworkAllowed("registry.npmjs.org")).toBe(true);
expect(runner.isNetworkAllowed("api.github.com")).toBe(true);
expect(runner.isNetworkAllowed("raw.github.com")).toBe(true);
expect(runner.isNetworkAllowed("evil.com")).toBe(false);
expect(runner.isNetworkAllowed("npmjs.org")).toBe(false);
});
test("network rules - deny all when no NET rules", () => {
const content = `FROM host`;
const runner = SandboxRunner.fromString(content);
expect(runner.isNetworkAllowed("registry.npmjs.org")).toBe(false);
expect(runner.isNetworkAllowed("google.com")).toBe(false);
});
test("network rules - allow all with wildcard", () => {
const content = `FROM host
NET *`;
const runner = SandboxRunner.fromString(content);
expect(runner.isNetworkAllowed("anything.com")).toBe(true);
expect(runner.isNetworkAllowed("evil.example.org")).toBe(true);
});
test("run setup commands", async () => {
using dir = tempDir("sandbox-test", {
Sandboxfile: `FROM host
RUN echo "setup complete" > setup.txt`,
});
const runner = await SandboxRunner.fromFile(`${dir}/Sandboxfile`, {
cwd: String(dir),
verbose: false,
});
const success = await runner.runSetup();
expect(success).toBe(true);
const setupFile = Bun.file(`${dir}/setup.txt`);
expect(await setupFile.exists()).toBe(true);
expect((await setupFile.text()).trim()).toBe("setup complete");
});
test("run tests and report results", async () => {
using dir = tempDir("sandbox-test", {
Sandboxfile: `FROM host
TEST echo "test passed"`,
});
const runner = await SandboxRunner.fromFile(`${dir}/Sandboxfile`, {
cwd: String(dir),
verbose: false,
});
const passed = await runner.runTests();
expect(passed).toBe(true);
});
test("detect failing tests", async () => {
using dir = tempDir("sandbox-test", {
Sandboxfile: `FROM host
TEST exit 1`,
});
const runner = await SandboxRunner.fromFile(`${dir}/Sandboxfile`, {
cwd: String(dir),
verbose: false,
});
const passed = await runner.runTests();
expect(passed).toBe(false);
});
test("dry run does not execute commands", async () => {
using dir = tempDir("sandbox-test", {
Sandboxfile: `FROM host
RUN touch should-not-exist.txt`,
});
const runner = await SandboxRunner.fromFile(`${dir}/Sandboxfile`, {
cwd: String(dir),
dryRun: true,
});
const success = await runner.runSetup();
expect(success).toBe(true);
const file = Bun.file(`${dir}/should-not-exist.txt`);
expect(await file.exists()).toBe(false);
});
test("collect output files", async () => {
using dir = tempDir("sandbox-test", {
Sandboxfile: `FROM host
OUTPUT *.txt`,
"file1.txt": "content1",
"file2.txt": "content2",
"file3.js": "ignored",
});
const outputDir = `${dir}/collected`;
await Bun.$`mkdir -p ${outputDir}`.quiet();
const runner = await SandboxRunner.fromFile(`${dir}/Sandboxfile`, {
cwd: String(dir),
});
const collected = await runner.collectOutputs(outputDir);
expect(collected).toContain("file1.txt");
expect(collected).toContain("file2.txt");
expect(collected).not.toContain("file3.js");
expect(await Bun.file(`${outputDir}/file1.txt`).text()).toBe("content1");
expect(await Bun.file(`${outputDir}/file2.txt`).text()).toBe("content2");
});
test("full sandbox run with setup and tests", async () => {
using dir = tempDir("sandbox-test", {
Sandboxfile: `FROM host
WORKDIR .
RUN echo "setup" > setup.log
TEST echo "test1"
TEST echo "test2"`,
});
const runner = await SandboxRunner.fromFile(`${dir}/Sandboxfile`, {
cwd: String(dir),
verbose: false,
});
const result = await runner.run();
expect(result.success).toBe(true);
expect(result.testsPassed).toBe(true);
const setupLog = Bun.file(`${dir}/setup.log`);
expect(await setupLog.exists()).toBe(true);
});
});
describe("Sandboxfile CLI", () => {
const cliPath = `${import.meta.dir}/../../../../packages/bun-sandbox/src/cli.ts`;
test("validate command succeeds for valid file", async () => {
using dir = tempDir("sandbox-cli-test", {
Sandboxfile: `FROM host
WORKDIR .
RUN echo hello`,
});
const proc = Bun.spawn({
cmd: [bunExe(), cliPath, "validate", "-f", "Sandboxfile"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
expect(stdout).toContain("Sandboxfile is valid");
expect(exitCode).toBe(0);
});
test("init command creates Sandboxfile", async () => {
using dir = tempDir("sandbox-cli-test", {});
const proc = Bun.spawn({
cmd: [bunExe(), cliPath, "init"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
expect(stdout).toContain("Created Sandboxfile");
expect(exitCode).toBe(0);
const sandboxfile = Bun.file(`${dir}/Sandboxfile`);
expect(await sandboxfile.exists()).toBe(true);
const content = await sandboxfile.text();
expect(content).toContain("FROM host");
expect(content).toContain("DEV PORT=3000");
});
test("help command shows usage", async () => {
const proc = Bun.spawn({
cmd: [bunExe(), cliPath, "--help"],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
expect(stdout).toContain("Usage:");
expect(stdout).toContain("bun sandbox");
expect(exitCode).toBe(0);
});
test("test command runs and passes", async () => {
using dir = tempDir("sandbox-cli-test", {
Sandboxfile: `FROM host
TEST echo "hello world"`,
});
const proc = Bun.spawn({
cmd: [bunExe(), cliPath, "test", "-f", "Sandboxfile"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
expect(stdout).toContain("All tests passed");
expect(exitCode).toBe(0);
});
test("test command fails on failing test", async () => {
using dir = tempDir("sandbox-cli-test", {
Sandboxfile: `FROM host
TEST exit 1`,
});
const proc = Bun.spawn({
cmd: [bunExe(), cliPath, "test", "-f", "Sandboxfile"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
// "Tests failed" appears in stderr
expect(stderr).toContain("Tests failed");
expect(exitCode).toBe(1);
});
});