732 lines
24 KiB
Python
732 lines
24 KiB
Python
#!/usr/bin/env python3
|
|
import json
|
|
import os
|
|
import resource
|
|
import signal
|
|
import subprocess
|
|
import sys
|
|
import tempfile
|
|
import time
|
|
import uuid
|
|
import shutil
|
|
import selectors
|
|
import errno
|
|
from pathlib import Path
|
|
from statistics import pstdev
|
|
|
|
APPS_DIR = Path(os.environ.get("APPS_DIR", "/apps"))
|
|
CASES_FILE = Path("/runner/cases.json")
|
|
|
|
OLDCPP_SRC = Path("/src/oldcpp/cowcomp.cpp")
|
|
RUST_SRC = Path("/src/rust/cowc.rs")
|
|
|
|
OLDCPP_BIN = Path("/usr/local/bin/cowcomp")
|
|
RUST_BIN = Path("/usr/local/bin/cowc")
|
|
|
|
SOURCES = {
|
|
"apps_repo": "https://git.seppjm.com/seppdroid/COW-Apps.git",
|
|
"old_compiler_cowcomp_cpp": "https://git.seppjm.com/mirrors/COW/raw/branch/master/source/cowcomp.cpp",
|
|
"new_compiler_cowc_rs": "https://git.seppjm.com/seppdroid/cowc/raw/branch/main/cowc.rs",
|
|
}
|
|
|
|
OLDCPP_GPP_FLAGS = ["-O0", "-g0", "-std=c++17", "-include", "cstdlib", "-Wno-c++23-extensions"]
|
|
OLDCPP_REBUILD_FLAGS = ["-O2", "-std=c++17"]
|
|
|
|
RUN_TIMEOUT_S = int(os.environ.get("RUN_TIMEOUT_S", "120"))
|
|
RUN_MEM_LIMIT_MB = int(os.environ.get("RUN_MEM_LIMIT_MB", "2048")) # 0 disables
|
|
|
|
COMPILE_TIMEOUT_S = int(os.environ.get("COMPILE_TIMEOUT_S", "300"))
|
|
COMPILE_MEM_LIMIT_MB = int(os.environ.get("COMPILE_MEM_LIMIT_MB", "0")) # 0 disables
|
|
|
|
CAPTURE_LIMIT_BYTES = int(os.environ.get("CAPTURE_LIMIT_BYTES", str(4 * 1024 * 1024))) # 0 disables
|
|
CGROUP_ROOT = Path("/sys/fs/cgroup")
|
|
|
|
|
|
def now_utc_iso() -> str:
|
|
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
|
|
|
|
|
def stats(samples):
|
|
if not samples:
|
|
return {"samples_seconds": [], "n": 0, "mean_seconds": 0.0, "std_seconds": 0.0}
|
|
mean = sum(samples) / len(samples)
|
|
std = pstdev(samples) if len(samples) > 1 else 0.0
|
|
return {"samples_seconds": samples, "n": len(samples), "mean_seconds": mean, "std_seconds": std}
|
|
|
|
|
|
def time_call(fn):
|
|
t0 = time.perf_counter()
|
|
out = fn()
|
|
t1 = time.perf_counter()
|
|
return (t1 - t0), out
|
|
|
|
|
|
def write_partial(path: Path, obj: dict):
|
|
path.parent.mkdir(parents=True, exist_ok=True)
|
|
tmp = path.with_suffix(path.suffix + ".partial")
|
|
tmp.write_text(json.dumps(obj, indent=2))
|
|
tmp.replace(path)
|
|
|
|
|
|
def capture_text(cmd):
|
|
try:
|
|
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
|
|
return (p.stdout + p.stderr).decode("utf-8", "replace").strip()
|
|
except Exception as e:
|
|
return f"error: {e}"
|
|
|
|
|
|
def host_info():
|
|
return {
|
|
"uname": capture_text(["uname", "-a"]),
|
|
"os_release": capture_text(["bash", "-lc", "cat /etc/os-release 2>/dev/null || true"]),
|
|
"cpu": capture_text(["bash", "-lc", "lscpu 2>/dev/null || true"]),
|
|
"python": sys.version.replace("\n", " "),
|
|
}
|
|
|
|
|
|
def tool_versions():
|
|
def firstline(x: str) -> str:
|
|
return x.splitlines()[0] if x else "n/a"
|
|
|
|
return {
|
|
"gcc": firstline(capture_text(["bash", "-lc", "gcc --version 2>/dev/null | head -n1 || true"])),
|
|
"g++": firstline(capture_text(["bash", "-lc", "g++ --version 2>/dev/null | head -n1 || true"])),
|
|
"rustc": firstline(capture_text(["bash", "-lc", "rustc --version 2>/dev/null || true"])),
|
|
"cargo": firstline(capture_text(["bash", "-lc", "cargo --version 2>/dev/null || true"])),
|
|
"git": firstline(capture_text(["bash", "-lc", "git --version 2>/dev/null || true"])),
|
|
}
|
|
|
|
|
|
def apps_commit():
|
|
try:
|
|
p = subprocess.run(
|
|
["git", "rev-parse", "HEAD"],
|
|
cwd=str(APPS_DIR),
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE,
|
|
check=True,
|
|
)
|
|
return p.stdout.decode().strip()
|
|
except Exception:
|
|
return "unknown"
|
|
|
|
|
|
def _limit_as_bytes(mem_mb: int) -> int:
|
|
return int(mem_mb) * 1024 * 1024
|
|
|
|
|
|
def _preexec_set_limits(mem_limit_mb: int):
|
|
def _fn():
|
|
if mem_limit_mb > 0:
|
|
lim = _limit_as_bytes(mem_limit_mb)
|
|
resource.setrlimit(resource.RLIMIT_AS, (lim, lim))
|
|
resource.setrlimit(resource.RLIMIT_DATA, (lim, lim))
|
|
return _fn
|
|
|
|
|
|
def _has_cgroup_v2() -> bool:
|
|
return (CGROUP_ROOT / "cgroup.controllers").exists() and (CGROUP_ROOT / "memory.max").exists()
|
|
|
|
|
|
def _cgroup_can_write() -> bool:
|
|
try:
|
|
return _has_cgroup_v2() and os.access(str(CGROUP_ROOT), os.W_OK)
|
|
except Exception:
|
|
return False
|
|
|
|
|
|
def _make_child_cgroup(mem_limit_mb: int) -> Path | None:
|
|
if mem_limit_mb <= 0:
|
|
return None
|
|
if not _cgroup_can_write():
|
|
return None
|
|
base = CGROUP_ROOT / "benchpy"
|
|
try:
|
|
base.mkdir(parents=True, exist_ok=True)
|
|
except Exception:
|
|
return None
|
|
cg = base / f"job-{uuid.uuid4().hex}"
|
|
try:
|
|
cg.mkdir(exist_ok=False)
|
|
(cg / "memory.max").write_text(str(_limit_as_bytes(mem_limit_mb)))
|
|
if (cg / "memory.oom.group").exists():
|
|
(cg / "memory.oom.group").write_text("1")
|
|
return cg
|
|
except Exception:
|
|
try:
|
|
if cg.exists():
|
|
cg.rmdir()
|
|
except Exception:
|
|
pass
|
|
return None
|
|
|
|
|
|
def _attach_pid_to_cgroup(cg: Path, pid: int) -> None:
|
|
try:
|
|
(cg / "cgroup.procs").write_text(str(pid))
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
def _cleanup_cgroup(cg: Path | None) -> None:
|
|
if not cg:
|
|
return
|
|
try:
|
|
cg.rmdir()
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
def _cgroup_oom_killed(cg: Path | None) -> bool:
|
|
if not cg:
|
|
return False
|
|
try:
|
|
events = (cg / "memory.events").read_text()
|
|
for line in events.splitlines():
|
|
if line.startswith("oom_kill "):
|
|
return int(line.split()[1]) > 0
|
|
except Exception:
|
|
pass
|
|
return False
|
|
|
|
|
|
def _cgroup_peak_bytes(cg: Path | None) -> int:
|
|
if not cg:
|
|
return 0
|
|
p = cg / "memory.peak"
|
|
if not p.exists():
|
|
return 0
|
|
try:
|
|
return int(p.read_text().strip())
|
|
except Exception:
|
|
return 0
|
|
|
|
|
|
def _classify_run_failure(returncode: int | None, stderr: str, timed_out: bool, capped: bool, oom_killed: bool) -> tuple[str, str]:
|
|
if capped:
|
|
return "output_limit", "stdout/stderr exceeded CAPTURE_LIMIT_BYTES"
|
|
if oom_killed:
|
|
return "oom", "cgroup oom_kill=1"
|
|
if timed_out:
|
|
return "timeout", "run exceeded timeout"
|
|
if returncode is None:
|
|
return "runtime_error", "no returncode"
|
|
if returncode == 0:
|
|
return "ok", ""
|
|
sig = -returncode if returncode < 0 else None
|
|
s = (stderr or "").lower()
|
|
if sig in (signal.SIGKILL, signal.SIGSEGV, signal.SIGABRT):
|
|
if "bad_alloc" in s or "cannot allocate memory" in s or "std::bad_alloc" in s or "out of memory" in s:
|
|
return "oom", f"killed by signal {sig} (oom-like stderr)"
|
|
if sig == signal.SIGKILL:
|
|
return "oom", "killed by SIGKILL (likely OOM)"
|
|
return "runtime_error", f"killed by signal {sig}"
|
|
if returncode == 137:
|
|
return "oom", "exit 137 (SIGKILL, likely OOM)"
|
|
if "bad_alloc" in s or "cannot allocate memory" in s or "out of memory" in s:
|
|
return "oom", "oom-like stderr"
|
|
return "runtime_error", f"non-zero exit ({returncode})"
|
|
|
|
|
|
def _classify_compile_failure(returncode: int | None, stderr: str, timed_out: bool, capped: bool, oom_killed: bool) -> tuple[str, str]:
|
|
if capped:
|
|
return "output_limit", "stdout/stderr exceeded CAPTURE_LIMIT_BYTES"
|
|
if oom_killed:
|
|
return "oom", "cgroup oom_kill=1"
|
|
if timed_out:
|
|
return "timeout", "compile exceeded timeout"
|
|
if returncode is None:
|
|
return "compile_error", "no returncode"
|
|
if returncode == 0:
|
|
return "ok", ""
|
|
sig = -returncode if returncode < 0 else None
|
|
s = (stderr or "").lower()
|
|
if returncode == 137 or sig == signal.SIGKILL:
|
|
return "oom", "killed by SIGKILL (likely OOM)"
|
|
if "cannot allocate memory" in s or "out of memory" in s or "bad_alloc" in s:
|
|
return "oom", "oom-like stderr"
|
|
return "compile_error", f"non-zero exit ({returncode})"
|
|
|
|
|
|
def _run_limited(cmd, *, cwd=None, input_bytes=None, timeout_s=0, mem_limit_mb=0, capture=True):
|
|
timed_out = False
|
|
capped = False
|
|
cg = _make_child_cgroup(mem_limit_mb)
|
|
|
|
out_buf = bytearray()
|
|
err_buf = bytearray()
|
|
|
|
def _append(buf: bytearray, data: bytes) -> None:
|
|
nonlocal capped
|
|
if CAPTURE_LIMIT_BYTES <= 0:
|
|
buf.extend(data)
|
|
return
|
|
if len(buf) >= CAPTURE_LIMIT_BYTES:
|
|
capped = True
|
|
return
|
|
room = CAPTURE_LIMIT_BYTES - len(buf)
|
|
if len(data) <= room:
|
|
buf.extend(data)
|
|
else:
|
|
buf.extend(data[:room])
|
|
capped = True
|
|
|
|
def _kill_pg(p: subprocess.Popen) -> None:
|
|
try:
|
|
os.killpg(p.pid, signal.SIGKILL)
|
|
except Exception:
|
|
try:
|
|
p.kill()
|
|
except Exception:
|
|
pass
|
|
|
|
try:
|
|
p = subprocess.Popen(
|
|
cmd,
|
|
cwd=cwd,
|
|
stdin=subprocess.PIPE if input_bytes is not None else None,
|
|
stdout=subprocess.PIPE if capture else subprocess.DEVNULL,
|
|
stderr=subprocess.PIPE if capture else subprocess.DEVNULL,
|
|
start_new_session=True,
|
|
preexec_fn=_preexec_set_limits(mem_limit_mb),
|
|
)
|
|
if cg:
|
|
_attach_pid_to_cgroup(cg, p.pid)
|
|
|
|
if input_bytes is not None and p.stdin:
|
|
try:
|
|
p.stdin.write(input_bytes)
|
|
except Exception:
|
|
pass
|
|
try:
|
|
p.stdin.close()
|
|
except Exception:
|
|
pass
|
|
|
|
if not capture:
|
|
try:
|
|
p.wait(timeout=(timeout_s if timeout_s and timeout_s > 0 else None))
|
|
oom = _cgroup_oom_killed(cg)
|
|
peak = _cgroup_peak_bytes(cg)
|
|
return (p.returncode, "", "", False, oom, peak, False)
|
|
except subprocess.TimeoutExpired:
|
|
timed_out = True
|
|
_kill_pg(p)
|
|
try:
|
|
p.wait(timeout=5)
|
|
except Exception:
|
|
pass
|
|
oom = _cgroup_oom_killed(cg)
|
|
peak = _cgroup_peak_bytes(cg)
|
|
return (None, "", "", True, oom, peak, False)
|
|
|
|
sel = selectors.DefaultSelector()
|
|
|
|
def _reg(fileobj, which: str):
|
|
if not fileobj:
|
|
return
|
|
try:
|
|
os.set_blocking(fileobj.fileno(), False)
|
|
except Exception:
|
|
pass
|
|
try:
|
|
sel.register(fileobj, selectors.EVENT_READ, which)
|
|
except Exception:
|
|
pass
|
|
|
|
_reg(p.stdout, "out")
|
|
_reg(p.stderr, "err")
|
|
|
|
start = time.monotonic()
|
|
deadline = start + (timeout_s if timeout_s and timeout_s > 0 else 10**9)
|
|
|
|
while True:
|
|
if capped:
|
|
_kill_pg(p)
|
|
try:
|
|
p.wait(timeout=5)
|
|
except Exception:
|
|
pass
|
|
break
|
|
|
|
now = time.monotonic()
|
|
if now >= deadline:
|
|
timed_out = True
|
|
_kill_pg(p)
|
|
try:
|
|
p.wait(timeout=5)
|
|
except Exception:
|
|
pass
|
|
break
|
|
|
|
if p.poll() is not None and not sel.get_map():
|
|
break
|
|
|
|
timeout_left = max(0.0, min(0.25, deadline - now))
|
|
events = sel.select(timeout_left)
|
|
|
|
if not events:
|
|
if p.poll() is not None:
|
|
for key in list(sel.get_map().values()):
|
|
try:
|
|
sel.unregister(key.fileobj)
|
|
except Exception:
|
|
pass
|
|
continue
|
|
|
|
for key, _mask in events:
|
|
which = key.data
|
|
f = key.fileobj
|
|
try:
|
|
data = f.read(65536)
|
|
except BlockingIOError:
|
|
continue
|
|
except OSError as e:
|
|
if getattr(e, "errno", None) in (errno.EAGAIN, errno.EWOULDBLOCK):
|
|
continue
|
|
data = b""
|
|
except Exception:
|
|
data = b""
|
|
|
|
if not data:
|
|
try:
|
|
sel.unregister(f)
|
|
except Exception:
|
|
pass
|
|
continue
|
|
|
|
if which == "out":
|
|
_append(out_buf, data)
|
|
else:
|
|
_append(err_buf, data)
|
|
|
|
try:
|
|
sel.close()
|
|
except Exception:
|
|
pass
|
|
|
|
rc = p.returncode
|
|
oom = _cgroup_oom_killed(cg)
|
|
peak = _cgroup_peak_bytes(cg)
|
|
return (
|
|
rc if not timed_out else None,
|
|
out_buf.decode("utf-8", "replace"),
|
|
err_buf.decode("utf-8", "replace"),
|
|
timed_out,
|
|
oom,
|
|
peak,
|
|
capped,
|
|
)
|
|
|
|
finally:
|
|
_cleanup_cgroup(cg)
|
|
|
|
|
|
def rebuild_compiler(kind: str):
|
|
if kind == "oldcpp":
|
|
if OLDCPP_BIN.exists():
|
|
OLDCPP_BIN.unlink()
|
|
rc, out, err, to, oom, peak, capped = _run_limited(
|
|
["g++", *OLDCPP_REBUILD_FLAGS, str(OLDCPP_SRC), "-o", str(OLDCPP_BIN)],
|
|
timeout_s=COMPILE_TIMEOUT_S,
|
|
mem_limit_mb=COMPILE_MEM_LIMIT_MB,
|
|
capture=True,
|
|
)
|
|
if to or oom or capped or rc != 0 or not OLDCPP_BIN.exists():
|
|
reason, detail = _classify_compile_failure(rc, (out + "\n" + err).strip(), to, capped, oom)
|
|
raise RuntimeError(f"rebuild cowcomp failed: {reason} ({detail})")
|
|
return str(OLDCPP_BIN)
|
|
|
|
if kind == "rust":
|
|
if RUST_BIN.exists():
|
|
RUST_BIN.unlink()
|
|
rc, out, err, to, oom, peak, capped = _run_limited(
|
|
["rustc", "-O", str(RUST_SRC), "-o", str(RUST_BIN)],
|
|
timeout_s=COMPILE_TIMEOUT_S,
|
|
mem_limit_mb=COMPILE_MEM_LIMIT_MB,
|
|
capture=True,
|
|
)
|
|
if to or oom or capped or rc != 0 or not RUST_BIN.exists():
|
|
reason, detail = _classify_compile_failure(rc, (out + "\n" + err).strip(), to, capped, oom)
|
|
raise RuntimeError(f"rebuild cowc failed: {reason} ({detail})")
|
|
return str(RUST_BIN)
|
|
|
|
raise ValueError("kind must be oldcpp or rust")
|
|
|
|
|
|
def _copy_file(src: Path, dst: Path):
|
|
dst.parent.mkdir(parents=True, exist_ok=True)
|
|
if dst.exists():
|
|
dst.unlink()
|
|
shutil.copyfile(str(src), str(dst))
|
|
dst.chmod(0o755)
|
|
|
|
|
|
def compile_app_oldcpp(cow_path: Path, out_bin: Path):
|
|
with tempfile.TemporaryDirectory() as td_str:
|
|
td = Path(td_str)
|
|
(td / "app.cow").write_bytes(cow_path.read_bytes())
|
|
|
|
cow_rc, cow_out, cow_err, cow_to, cow_oom, cow_peak, cow_capped = _run_limited(
|
|
[str(OLDCPP_BIN), "app.cow"],
|
|
cwd=str(td),
|
|
timeout_s=COMPILE_TIMEOUT_S,
|
|
mem_limit_mb=COMPILE_MEM_LIMIT_MB,
|
|
capture=True,
|
|
)
|
|
cow_text = (cow_out + ("\n" if cow_out and cow_err else "") + cow_err).strip()
|
|
|
|
if cow_to or cow_oom or cow_capped or cow_rc != 0:
|
|
reason, detail = _classify_compile_failure(cow_rc, cow_text, cow_to, cow_capped, cow_oom)
|
|
raise RuntimeError(f"cowcomp failed: {reason} ({detail})")
|
|
|
|
produced = td / "cow.out"
|
|
if produced.exists():
|
|
_copy_file(produced, out_bin)
|
|
return {"cowcomp_exit": cow_rc, "mode": "cowcomp_direct"}
|
|
|
|
cpp = td / "cow.out.cpp"
|
|
if not cpp.exists():
|
|
raise RuntimeError(
|
|
"cowcomp did not produce cow.out and did not produce cow.out.cpp.\n"
|
|
f"cowcomp exit code: {cow_rc}\n"
|
|
f"cowcomp output:\n{cow_text}\n"
|
|
)
|
|
|
|
gpp_cmd = ["g++", *OLDCPP_GPP_FLAGS, str(cpp), "-o", str(td / "cow.out")]
|
|
gpp_rc, gpp_out, gpp_err, gpp_to, gpp_oom, gpp_peak, gpp_capped = _run_limited(
|
|
gpp_cmd,
|
|
cwd=str(td),
|
|
timeout_s=COMPILE_TIMEOUT_S,
|
|
mem_limit_mb=COMPILE_MEM_LIMIT_MB,
|
|
capture=True,
|
|
)
|
|
gpp_text = (gpp_out + ("\n" if gpp_out and gpp_err else "") + gpp_err).strip()
|
|
|
|
if (td / "cow.out").exists():
|
|
_copy_file(td / "cow.out", out_bin)
|
|
return {
|
|
"cowcomp_exit": cow_rc,
|
|
"gpp_exit": gpp_rc,
|
|
"mode": "cowcomp_cpp_manual",
|
|
"cow_out_cpp_bytes": cpp.stat().st_size,
|
|
"gpp_cmd": gpp_cmd,
|
|
}
|
|
|
|
reason, detail = _classify_compile_failure(gpp_rc, gpp_text, gpp_to, gpp_capped, gpp_oom)
|
|
raise RuntimeError(
|
|
"cowcomp produced cow.out.cpp but compilation failed.\n"
|
|
f"failure: {reason} ({detail})\n"
|
|
f"cowcomp exit code: {cow_rc}\n"
|
|
f"cowcomp output:\n{cow_text}\n"
|
|
f"g++ cmd: {' '.join(gpp_cmd)}\n"
|
|
f"g++ exit code: {gpp_rc}\n"
|
|
f"g++ output:\n{gpp_text}\n"
|
|
)
|
|
|
|
|
|
def compile_app(kind: str, cow_path: Path, out_bin: Path):
|
|
if out_bin.exists():
|
|
out_bin.unlink()
|
|
|
|
if kind == "oldcpp":
|
|
return compile_app_oldcpp(cow_path, out_bin)
|
|
|
|
rc, out, err, to, oom, peak, capped = _run_limited(
|
|
[str(RUST_BIN), str(cow_path), "--lto", "-o", str(out_bin)],
|
|
timeout_s=COMPILE_TIMEOUT_S,
|
|
mem_limit_mb=COMPILE_MEM_LIMIT_MB,
|
|
capture=True,
|
|
)
|
|
if to or oom or capped or rc != 0 or not out_bin.exists():
|
|
reason, detail = _classify_compile_failure(rc, (out + "\n" + err).strip(), to, capped, oom)
|
|
raise RuntimeError(f"cowc compile failed: {reason} ({detail})")
|
|
|
|
out_bin.chmod(0o755)
|
|
return {"mode": "cowc"}
|
|
|
|
|
|
def run_bin(bin_path: Path, stdin_text: str, timeout_s: int, run_mem_limit_mb: int, capture: bool):
|
|
rc, out, err, to, oom, peak, capped = _run_limited(
|
|
[str(bin_path)],
|
|
input_bytes=stdin_text.encode("utf-8"),
|
|
timeout_s=timeout_s,
|
|
mem_limit_mb=run_mem_limit_mb,
|
|
capture=capture,
|
|
)
|
|
return out, rc, err, to, oom, peak, capped
|
|
|
|
|
|
def main():
|
|
if len(sys.argv) != 3:
|
|
print("usage: bench.py (oldcpp|rust) /results/out.json", file=sys.stderr)
|
|
sys.exit(2)
|
|
|
|
kind = sys.argv[1]
|
|
out_json = Path(sys.argv[2])
|
|
|
|
print(f"[{kind}] starting {now_utc_iso()} (UTC)", flush=True)
|
|
|
|
cases = json.loads(CASES_FILE.read_text())
|
|
reps_build = int(cases["repetitions"]["build_compiler"])
|
|
reps_compile = int(cases["repetitions"]["compile_app"])
|
|
reps_run = int(cases["repetitions"]["run_app"])
|
|
|
|
base_obj = {
|
|
"generated_at_utc": now_utc_iso(),
|
|
"compiler_kind": kind,
|
|
"sources": SOURCES,
|
|
"apps_repo_commit": apps_commit(),
|
|
"environment": {"host_info": host_info(), "tool_versions": tool_versions()},
|
|
"limits": {
|
|
"run_timeout_s": RUN_TIMEOUT_S,
|
|
"run_mem_limit_mb": RUN_MEM_LIMIT_MB,
|
|
"compile_timeout_s": COMPILE_TIMEOUT_S,
|
|
"compile_mem_limit_mb": COMPILE_MEM_LIMIT_MB,
|
|
"capture_limit_bytes": CAPTURE_LIMIT_BYTES,
|
|
},
|
|
"timings": {},
|
|
"apps": [],
|
|
}
|
|
write_partial(out_json, base_obj)
|
|
|
|
build_samples = []
|
|
build_errors = []
|
|
for i in range(1, reps_build + 1):
|
|
try:
|
|
print(f"[{kind}] build compiler {i}/{reps_build}", flush=True)
|
|
dt, _ = time_call(lambda: rebuild_compiler(kind))
|
|
build_samples.append(dt)
|
|
except Exception as e:
|
|
build_errors.append(str(e))
|
|
break
|
|
|
|
app_results = []
|
|
for app in cases["apps"]:
|
|
name = app["name"]
|
|
file = app["file"]
|
|
stdin = app.get("stdin", "")
|
|
|
|
cow_path = APPS_DIR / file
|
|
print(f"[{kind}] app {name}", flush=True)
|
|
|
|
if not cow_path.exists():
|
|
app_results.append(
|
|
{
|
|
"name": name,
|
|
"file": file,
|
|
"stdin": stdin,
|
|
"status": "DNF",
|
|
"dnf_phase": "compile",
|
|
"dnf_reason": "missing_input",
|
|
"dnf_detail": f"missing file: {cow_path}",
|
|
"compile": stats([]),
|
|
"compile_meta": None,
|
|
"compile_error": None,
|
|
"run": stats([]),
|
|
"output_check": "",
|
|
"returncode": None,
|
|
"stderr_check": "",
|
|
"run_peak_bytes": 0,
|
|
}
|
|
)
|
|
base_obj["apps"] = app_results
|
|
base_obj["timings"] = {"build_compiler": stats(build_samples), "build_compiler_errors": build_errors}
|
|
write_partial(out_json, base_obj)
|
|
continue
|
|
|
|
bin_path = Path("/work") / f"bin_{kind}_{name}"
|
|
|
|
compile_samples = []
|
|
compile_error = None
|
|
compile_meta = None
|
|
status = "PASS"
|
|
dnf_phase = None
|
|
dnf_reason = None
|
|
dnf_detail = None
|
|
run_peak = 0
|
|
|
|
for _i in range(1, reps_compile + 1):
|
|
try:
|
|
dt, meta = time_call(lambda: compile_app(kind, cow_path, bin_path))
|
|
compile_samples.append(dt)
|
|
compile_meta = meta
|
|
except Exception as e:
|
|
msg = str(e)
|
|
status = "DNF"
|
|
dnf_phase = "compile"
|
|
low = msg.lower()
|
|
if "oom" in low:
|
|
dnf_reason = "oom"
|
|
elif "timeout" in low:
|
|
dnf_reason = "timeout"
|
|
elif "output_limit" in low:
|
|
dnf_reason = "output_limit"
|
|
else:
|
|
dnf_reason = "compile_error"
|
|
dnf_detail = msg
|
|
compile_error = msg
|
|
break
|
|
|
|
run_samples = []
|
|
out = ""
|
|
rc = None
|
|
err = ""
|
|
|
|
if status == "PASS" and bin_path.exists():
|
|
out, rc, err, timed_out, oom_killed, peak, capped = run_bin(
|
|
bin_path, stdin, RUN_TIMEOUT_S, RUN_MEM_LIMIT_MB, capture=True
|
|
)
|
|
run_peak = max(run_peak, int(peak or 0))
|
|
reason, detail = _classify_run_failure(rc, err, timed_out, capped, oom_killed)
|
|
if reason != "ok":
|
|
status = "DNF"
|
|
dnf_phase = "run"
|
|
dnf_reason = reason
|
|
dnf_detail = detail
|
|
|
|
if status == "PASS":
|
|
for _ in range(reps_run):
|
|
dt, (_out2, rrc, rerr, timed_out, oom_killed, peak, capped) = time_call(
|
|
lambda: run_bin(bin_path, stdin, RUN_TIMEOUT_S, RUN_MEM_LIMIT_MB, capture=False)
|
|
)
|
|
run_peak = max(run_peak, int(peak or 0))
|
|
reason, detail = _classify_run_failure(rrc, rerr, timed_out, capped, oom_killed)
|
|
if reason != "ok":
|
|
status = "DNF"
|
|
dnf_phase = "run"
|
|
dnf_reason = reason
|
|
dnf_detail = detail
|
|
break
|
|
run_samples.append(dt)
|
|
|
|
app_results.append(
|
|
{
|
|
"name": name,
|
|
"file": file,
|
|
"stdin": stdin,
|
|
"status": status,
|
|
"dnf_phase": dnf_phase,
|
|
"dnf_reason": dnf_reason,
|
|
"dnf_detail": dnf_detail,
|
|
"compile": stats(compile_samples),
|
|
"compile_meta": compile_meta,
|
|
"compile_error": compile_error,
|
|
"run": stats(run_samples),
|
|
"output_check": out,
|
|
"returncode": rc,
|
|
"stderr_check": err,
|
|
"run_peak_bytes": run_peak,
|
|
}
|
|
)
|
|
|
|
base_obj["apps"] = app_results
|
|
base_obj["timings"] = {"build_compiler": stats(build_samples), "build_compiler_errors": build_errors}
|
|
write_partial(out_json, base_obj)
|
|
|
|
print(f"[{kind}] wrote {out_json}", flush=True)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |