diff --git a/.github/workflows/player-perf.yml b/.github/workflows/player-perf.yml
new file mode 100644
index 000000000..937a60113
--- /dev/null
+++ b/.github/workflows/player-perf.yml
@@ -0,0 +1,120 @@
+name: Player perf
+
+on:
+ pull_request:
+ push:
+ branches: [main]
+
+concurrency:
+ group: player-perf-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ changes:
+ name: Detect changes
+ runs-on: ubuntu-latest
+ timeout-minutes: 2
+ outputs:
+ perf: ${{ steps.filter.outputs.perf }}
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dorny/paths-filter@v3
+ id: filter
+ with:
+ filters: |
+ perf:
+ - "packages/player/**"
+ - "packages/core/**"
+ - "package.json"
+ - "bun.lock"
+ - ".github/workflows/player-perf.yml"
+
+ perf-shards:
+ name: "Perf: ${{ matrix.shard }}"
+ needs: changes
+ if: needs.changes.outputs.perf == 'true'
+ runs-on: ubuntu-latest
+ timeout-minutes: 20
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - shard: load
+ scenarios: load
+ runs: "5"
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: oven-sh/setup-bun@v2
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+
+ - run: bun install --frozen-lockfile
+
+ # Player perf loads packages/player/dist/hyperframes-player.global.js
+ # and packages/core/dist/hyperframe.runtime.iife.js, so a full build is required.
+ - run: bun run build
+
+ - name: Set up Chrome (headless shell)
+ id: setup-chrome
+ uses: browser-actions/setup-chrome@v1
+ with:
+ chrome-version: stable
+
+ - name: Run player perf — ${{ matrix.shard }} (measure mode)
+ working-directory: packages/player
+ env:
+ PUPPETEER_EXECUTABLE_PATH: ${{ steps.setup-chrome.outputs.chrome-path }}
+ run: |
+ bun run perf \
+ --mode=measure \
+ --scenarios=${{ matrix.scenarios }} \
+ --runs=${{ matrix.runs }}
+
+ - name: Upload perf results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: player-perf-${{ matrix.shard }}
+ path: packages/player/tests/perf/results/
+ if-no-files-found: warn
+ retention-days: 30
+
+ # Summary job — matches the required check name in branch protection.
+ # Logs an explicit "skipped" / "passed" / "failed" line both to stdout and to
+ # $GITHUB_STEP_SUMMARY so a false skip is obvious in the Checks UI without
+ # having to dig into the changes-job logs.
+ player-perf:
+ runs-on: ubuntu-latest
+ needs: [changes, perf-shards]
+ if: always()
+ steps:
+ - name: Check results
+ env:
+ PERF_FILTER_RESULT: ${{ needs.changes.outputs.perf }}
+ PERF_SHARDS_RESULT: ${{ needs.perf-shards.result }}
+ run: |
+ {
+ echo "## Player perf gate"
+ echo ""
+ echo "- paths-filter \`perf\` matched: \`${PERF_FILTER_RESULT}\`"
+ echo "- perf-shards result: \`${PERF_SHARDS_RESULT}\`"
+ echo ""
+ } >> "$GITHUB_STEP_SUMMARY"
+
+ if [ "${PERF_FILTER_RESULT}" != "true" ]; then
+ echo "::notice title=Player perf::SKIPPED — no changes under packages/player/**, packages/core/**, package.json, bun.lock, or .github/workflows/player-perf.yml. Auto-pass."
+ echo "**Status:** SKIPPED (no player/core changes — auto-pass)" >> "$GITHUB_STEP_SUMMARY"
+ exit 0
+ fi
+
+ if [ "${PERF_SHARDS_RESULT}" != "success" ]; then
+ echo "::error title=Player perf::FAILED — perf-shards result was '${PERF_SHARDS_RESULT}'. See the per-shard logs above."
+ echo "**Status:** FAILED (perf-shards result: \`${PERF_SHARDS_RESULT}\`)" >> "$GITHUB_STEP_SUMMARY"
+ exit 1
+ fi
+
+ echo "::notice title=Player perf::PASSED — all perf shards completed successfully."
+ echo "**Status:** PASSED" >> "$GITHUB_STEP_SUMMARY"
diff --git a/.gitignore b/.gitignore
index cbff81c71..5f127cdb1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -33,6 +33,9 @@ coverage/
# Producer regression test failures (generated debugging artifacts)
packages/producer/tests/*/failures/
+# Player perf test results (generated each run, attached as CI artifact)
+packages/player/tests/perf/results/
+
# Rendered output (not test fixtures — those use git LFS)
output/
renders/
diff --git a/bun.lock b/bun.lock
index 6f9b39a0e..596bbd1ae 100644
--- a/bun.lock
+++ b/bun.lock
@@ -108,6 +108,9 @@
"name": "@hyperframes/player",
"version": "0.4.11",
"devDependencies": {
+ "@types/bun": "^1.1.0",
+ "gsap": "^3.12.5",
+ "puppeteer-core": "^24.39.1",
"tsup": "^8.0.0",
"typescript": "^5.0.0",
"vitest": "^3.2.4",
@@ -740,6 +743,8 @@
"@types/babel__traverse": ["@types/babel__traverse@7.28.0", "", { "dependencies": { "@babel/types": "7.29.0" } }, "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q=="],
+ "@types/bun": ["@types/bun@1.3.12", "", { "dependencies": { "bun-types": "1.3.12" } }, "sha512-DBv81elK+/VSwXHDlnH3Qduw+KxkTIWi7TXkAeh24zpi5l0B2kUg9Ga3tb4nJaPcOFswflgi/yAvMVBPrxMB+A=="],
+
"@types/chai": ["@types/chai@5.2.3", "", { "dependencies": { "@types/deep-eql": "4.0.2", "assertion-error": "2.0.1" } }, "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA=="],
"@types/deep-eql": ["@types/deep-eql@4.0.2", "", {}, "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw=="],
@@ -858,6 +863,8 @@
"buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
+ "bun-types": ["bun-types@1.3.12", "", { "dependencies": { "@types/node": "*" } }, "sha512-HqOLj5PoFajAQciOMRiIZGNoKxDJSr6qigAttOX40vJuSp6DN/CxWp9s3C1Xwm4oH7ybueITwiaOcWXoYVoRkA=="],
+
"bundle-name": ["bundle-name@4.1.0", "", { "dependencies": { "run-applescript": "7.1.0" } }, "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q=="],
"bundle-require": ["bundle-require@5.1.0", "", { "dependencies": { "load-tsconfig": "0.2.5" }, "peerDependencies": { "esbuild": "0.27.4" } }, "sha512-3WrrOuZiyaaZPWiEt4G3+IffISVC9HYlWueJEBWED4ZH4aIAC2PnkdnuRrR94M+w6yGWn4AglWtJtBI8YqvgoA=="],
@@ -1072,6 +1079,8 @@
"google-logging-utils": ["google-logging-utils@1.1.3", "", {}, "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA=="],
+ "gsap": ["gsap@3.15.0", "", {}, "sha512-dMW4CWBTUK1AEEDeZc1g4xpPGIrSf9fJF960qbTZmN/QwZIWY5wgliS6JWl9/25fpTGJrMRtSjGtOmPnfjZB+A=="],
+
"happy-dom": ["happy-dom@20.9.0", "", { "dependencies": { "@types/node": ">=20.0.0", "@types/whatwg-mimetype": "^3.0.2", "@types/ws": "^8.18.1", "entities": "^7.0.1", "whatwg-mimetype": "^3.0.0", "ws": "^8.18.3" } }, "sha512-GZZ9mKe8r646NUAf/zemnGbjYh4Bt8/MqASJY+pSm5ZDtc3YQox+4gsLI7yi1hba6o+eCsGxpHn5+iEVn31/FQ=="],
"has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="],
diff --git a/package.json b/package.json
index 6a7c897c0..71bf675a4 100644
--- a/package.json
+++ b/package.json
@@ -25,6 +25,7 @@
"lint:fix": "oxlint --fix .",
"format": "oxfmt .",
"test": "bun run --filter '*' test",
+ "player:perf": "bun run --filter @hyperframes/player perf",
"format:check": "oxfmt --check .",
"knip": "knip",
"generate:previews": "tsx scripts/generate-template-previews.ts",
diff --git a/packages/player/package.json b/packages/player/package.json
index b410a7cbf..f6aa27a58 100644
--- a/packages/player/package.json
+++ b/packages/player/package.json
@@ -23,10 +23,14 @@
},
"scripts": {
"build": "tsup",
- "typecheck": "tsc --noEmit",
- "test": "vitest run"
+ "typecheck": "tsc --noEmit && tsc --noEmit -p tests/perf/tsconfig.json",
+ "test": "vitest run",
+ "perf": "bun run tests/perf/index.ts"
},
"devDependencies": {
+ "@types/bun": "^1.1.0",
+ "gsap": "^3.12.5",
+ "puppeteer-core": "^24.39.1",
"tsup": "^8.0.0",
"typescript": "^5.0.0",
"vitest": "^3.2.4"
diff --git a/packages/player/tests/perf/baseline.json b/packages/player/tests/perf/baseline.json
new file mode 100644
index 000000000..52211e710
--- /dev/null
+++ b/packages/player/tests/perf/baseline.json
@@ -0,0 +1,10 @@
+{
+ "compLoadColdP95Ms": 2000,
+ "compLoadWarmP95Ms": 1000,
+ "fpsMin": 55,
+ "scrubLatencyP95IsolatedMs": 80,
+ "scrubLatencyP95InlineMs": 33,
+ "driftMaxMs": 500,
+ "driftP95Ms": 100,
+ "allowedRegressionRatio": 0.1
+}
diff --git a/packages/player/tests/perf/fixtures/gsap-heavy/index.html b/packages/player/tests/perf/fixtures/gsap-heavy/index.html
new file mode 100644
index 000000000..74416fbf4
--- /dev/null
+++ b/packages/player/tests/perf/fixtures/gsap-heavy/index.html
@@ -0,0 +1,115 @@
+
+
+
+
+ perf fixture: gsap-heavy
+
+
+
+
+
+
+
+
+
diff --git a/packages/player/tests/perf/index.ts b/packages/player/tests/perf/index.ts
new file mode 100644
index 000000000..c6942ae9d
--- /dev/null
+++ b/packages/player/tests/perf/index.ts
@@ -0,0 +1,200 @@
+#!/usr/bin/env bun
+/**
+ * Player Performance Test Runner
+ *
+ * Boots a static server, launches puppeteer-core against locally-served fixtures,
+ * runs the configured scenarios, then evaluates the collected metrics against
+ * baseline.json via perf-gate.
+ *
+ * Usage:
+ * bun run packages/player/tests/perf/index.ts
+ * bun run packages/player/tests/perf/index.ts --mode enforce
+ * bun run packages/player/tests/perf/index.ts --scenarios load
+ * bun run packages/player/tests/perf/index.ts --runs 5 --headful
+ *
+ * Flags:
+ * --mode default: PLAYER_PERF_MODE env or "measure"
+ * --scenarios comma-separated scenario ids; default: all enabled
+ * --runs override per-scenario run count
+ * --fixture single fixture (default: every fixture in fixtures/)
+ * --headful show the browser; default: headless
+ *
+ * Exit codes:
+ * 0 all pass (or measure mode)
+ * 1 scenario crashed
+ * 2 perf gate failed in enforce mode
+ */
+
+import { execFileSync } from "node:child_process";
+import { existsSync, mkdirSync, writeFileSync } from "node:fs";
+import { dirname, resolve } from "node:path";
+import { fileURLToPath } from "node:url";
+import { runLoad } from "./scenarios/03-load.ts";
+import { reportAndGate, type GateMode, type GateResult, type Metric } from "./perf-gate.ts";
+import { launchBrowser } from "./runner.ts";
+import { startServer } from "./server.ts";
+
+const HERE = dirname(fileURLToPath(import.meta.url));
+const RESULTS_DIR = resolve(HERE, "results");
+const RESULTS_FILE = resolve(RESULTS_DIR, "metrics.json");
+
+type ScenarioId = "load";
+
+type ResultsFile = {
+ schemaVersion: 1;
+ timestamp: string;
+ gitSha: string | null;
+ mode: GateMode;
+ scenarios: ScenarioId[];
+ runs: number | null;
+ fixture: string | null;
+ crashed: boolean;
+ passed: boolean;
+ metrics: Metric[];
+ gate: GateResult[];
+};
+
+function readGitSha(): string | null {
+ try {
+ return execFileSync("git", ["rev-parse", "HEAD"], { encoding: "utf-8" }).trim();
+ } catch {
+ return null;
+ }
+}
+
+function writeResults(file: ResultsFile): void {
+ if (!existsSync(RESULTS_DIR)) {
+ mkdirSync(RESULTS_DIR, { recursive: true });
+ }
+ writeFileSync(RESULTS_FILE, JSON.stringify(file, null, 2) + "\n");
+ console.log(`[player-perf] wrote results to ${RESULTS_FILE}`);
+}
+
+type ParsedArgs = {
+ mode: GateMode;
+ scenarios: ScenarioId[];
+ runs: number | null;
+ fixture: string | null;
+ headful: boolean;
+};
+
+function parseArgs(argv: string[]): ParsedArgs {
+ const result: ParsedArgs = {
+ // TODO(player-perf): once baselines have settled on CI for ~1–2 weeks and we
+ // are confident there are no false positives from runner jitter, flip this
+ // default from "measure" to "enforce" — that single line + bumping the
+ // workflow's `--mode=measure` flag in .github/workflows/player-perf.yml is
+ // the entire opt-in. See packages/player/tests/perf/perf-gate.ts for how
+ // `mode` is consumed (measure logs regressions but never fails; enforce
+ // exits non-zero on regression).
+ mode: (process.env.PLAYER_PERF_MODE as GateMode) === "enforce" ? "enforce" : "measure",
+ scenarios: ["load"],
+ runs: null,
+ fixture: null,
+ headful: false,
+ };
+ // Normalize `--key=value` into `[--key, value]` so the rest of the loop
+ // only has to handle the space-separated form.
+ const tokens: string[] = [];
+ for (const raw of argv.slice(2)) {
+ if (raw.startsWith("--") && raw.includes("=")) {
+ const eq = raw.indexOf("=");
+ tokens.push(raw.slice(0, eq), raw.slice(eq + 1));
+ } else {
+ tokens.push(raw);
+ }
+ }
+ for (let i = 0; i < tokens.length; i++) {
+ const arg = tokens[i];
+ const next = tokens[i + 1];
+ if (arg === "--mode" && next) {
+ if (next !== "measure" && next !== "enforce") {
+ throw new Error(`--mode must be measure|enforce, got ${next}`);
+ }
+ result.mode = next;
+ i++;
+ } else if (arg === "--scenarios" && next) {
+ result.scenarios = next.split(",").map((s) => s.trim()) as ScenarioId[];
+ i++;
+ } else if (arg === "--runs" && next) {
+ result.runs = parseInt(next, 10);
+ i++;
+ } else if (arg === "--fixture" && next) {
+ result.fixture = next;
+ i++;
+ } else if (arg === "--headful") {
+ result.headful = true;
+ }
+ }
+ return result;
+}
+
+async function main(): Promise {
+ const args = parseArgs(process.argv);
+ console.log(
+ `[player-perf] starting: mode=${args.mode} scenarios=${args.scenarios.join(",")} runs=${args.runs ?? "default"} fixture=${args.fixture ?? "all"}`,
+ );
+
+ const server = startServer();
+ console.log(`[player-perf] server listening at ${server.origin}`);
+
+ const browser = await launchBrowser({ headless: !args.headful });
+ console.log("[player-perf] browser launched");
+
+ const metrics: Metric[] = [];
+ let crashed = false;
+
+ try {
+ for (const scenario of args.scenarios) {
+ if (scenario === "load") {
+ const m = await runLoad({
+ browser,
+ origin: server.origin,
+ runs: args.runs ?? 5,
+ fixture: args.fixture,
+ });
+ metrics.push(...m);
+ } else {
+ console.warn(`[player-perf] unknown scenario: ${scenario}`);
+ }
+ }
+ } catch (err) {
+ crashed = true;
+ console.error("[player-perf] scenario crashed:", err);
+ } finally {
+ await browser.close();
+ await server.stop();
+ }
+
+ let report: { passed: boolean; rows: GateResult[] } = { passed: !crashed, rows: [] };
+ if (!crashed && metrics.length > 0) {
+ report = reportAndGate(metrics, args.mode);
+ }
+
+ writeResults({
+ schemaVersion: 1,
+ timestamp: new Date().toISOString(),
+ gitSha: readGitSha(),
+ mode: args.mode,
+ scenarios: args.scenarios,
+ runs: args.runs,
+ fixture: args.fixture,
+ crashed,
+ passed: report.passed && !crashed,
+ metrics,
+ gate: report.rows,
+ });
+
+ if (crashed) {
+ process.exit(1);
+ }
+ if (!report.passed) {
+ process.exit(2);
+ }
+ process.exit(0);
+}
+
+main().catch((err) => {
+ console.error("[player-perf] fatal:", err);
+ process.exit(1);
+});
diff --git a/packages/player/tests/perf/perf-gate.ts b/packages/player/tests/perf/perf-gate.ts
new file mode 100644
index 000000000..60cf7f52e
--- /dev/null
+++ b/packages/player/tests/perf/perf-gate.ts
@@ -0,0 +1,106 @@
+import { readFileSync } from "node:fs";
+import { dirname, resolve } from "node:path";
+import { fileURLToPath } from "node:url";
+
+/**
+ * Compares measured perf metrics against baseline.json with an allowed regression ratio.
+ *
+ * Mirrors packages/producer/src/perf-gate.ts: each metric has a baseline value, the
+ * gate computes `max = baseline * (1 + allowedRegressionRatio)`, and any measured
+ * value above max counts as a regression. In "measure" mode the script logs but
+ * never exits non-zero — useful for the first runs while we collect realistic
+ * baselines on the CI runner. Flip to "enforce" once baselines are committed.
+ */
+
+const HERE = dirname(fileURLToPath(import.meta.url));
+const DEFAULT_BASELINE_PATH = resolve(HERE, "baseline.json");
+
+export type Direction = "lower-is-better" | "higher-is-better";
+
+export type Metric = {
+ /** Display name, e.g. "comp_load_cold_p95_ms" */
+ name: string;
+ /** Key into baseline.json, e.g. "compLoadColdP95Ms" */
+ baselineKey: keyof PerfBaseline;
+ value: number;
+ unit: string;
+ direction: Direction;
+ samples?: number[];
+};
+
+export type PerfBaseline = {
+ compLoadColdP95Ms: number;
+ compLoadWarmP95Ms: number;
+ fpsMin: number;
+ scrubLatencyP95IsolatedMs: number;
+ scrubLatencyP95InlineMs: number;
+ driftMaxMs: number;
+ driftP95Ms: number;
+ allowedRegressionRatio: number;
+};
+
+export type GateMode = "measure" | "enforce";
+
+export type GateResult = {
+ metric: Metric;
+ baseline: number;
+ threshold: number;
+ passed: boolean;
+ ratio: number;
+};
+
+export function loadBaseline(path?: string): PerfBaseline {
+ const baselinePath = path ?? process.env.PLAYER_PERF_BASELINE_PATH ?? DEFAULT_BASELINE_PATH;
+ const raw = readFileSync(baselinePath, "utf-8");
+ return JSON.parse(raw) as PerfBaseline;
+}
+
+export function evaluateMetric(metric: Metric, baseline: PerfBaseline): GateResult {
+ const baselineValue = baseline[metric.baselineKey];
+ if (typeof baselineValue !== "number") {
+ throw new Error(`[player-perf] baseline missing numeric key: ${String(metric.baselineKey)}`);
+ }
+ const allowed = baseline.allowedRegressionRatio;
+ const threshold =
+ metric.direction === "lower-is-better"
+ ? baselineValue * (1 + allowed)
+ : baselineValue * (1 - allowed);
+ const passed =
+ metric.direction === "lower-is-better" ? metric.value <= threshold : metric.value >= threshold;
+ const ratio = baselineValue === 0 ? 0 : metric.value / baselineValue;
+ return { metric, baseline: baselineValue, threshold, passed, ratio };
+}
+
+export type GateReport = {
+ passed: boolean;
+ rows: GateResult[];
+};
+
+export function reportAndGate(
+ metrics: Metric[],
+ // `mode` is resolved upstream in packages/player/tests/perf/index.ts
+ // (`parseArgs`): the default comes from PLAYER_PERF_MODE env or "measure", and
+ // the CLI flag `--mode=measure|enforce` overrides it. The "flip to enforce"
+ // TODO lives at that call site so it is a one-line change.
+ mode: GateMode,
+ baselinePath?: string,
+): GateReport {
+ const baseline = loadBaseline(baselinePath);
+ const rows = metrics.map((m) => evaluateMetric(m, baseline));
+ console.log("[PerfGate] mode=" + mode);
+ for (const row of rows) {
+ const status = row.passed ? "PASS" : "FAIL";
+ const dir = row.metric.direction === "lower-is-better" ? "≤" : "≥";
+ console.log(
+ `[PerfGate] ${status} ${row.metric.name} = ${row.metric.value.toFixed(2)}${row.metric.unit} (baseline=${row.baseline}${row.metric.unit}, threshold ${dir} ${row.threshold.toFixed(2)}${row.metric.unit}, ratio=${row.ratio.toFixed(3)})`,
+ );
+ }
+ const failed = rows.filter((r) => !r.passed);
+ if (failed.length === 0) return { passed: true, rows };
+ if (mode === "measure") {
+ console.log(`[PerfGate] ${failed.length} regression(s) detected — measure mode, not failing`);
+ return { passed: true, rows };
+ }
+ console.error(`[PerfGate] ${failed.length} regression(s) detected — enforce mode, failing`);
+ return { passed: false, rows };
+}
diff --git a/packages/player/tests/perf/runner.ts b/packages/player/tests/perf/runner.ts
new file mode 100644
index 000000000..56282c188
--- /dev/null
+++ b/packages/player/tests/perf/runner.ts
@@ -0,0 +1,137 @@
+import { existsSync } from "node:fs";
+import { dirname, resolve } from "node:path";
+import { fileURLToPath } from "node:url";
+import puppeteer, { type Browser, type LaunchOptions, type Page } from "puppeteer-core";
+
+/**
+ * Puppeteer browser + page helpers shared across all perf scenarios.
+ *
+ * Browser launch args mirror packages/producer/src/parity-harness.ts so we get
+ * the same SwiftShader-backed WebGL output and font hinting between perf runs
+ * and visual parity runs. That parity matters for P0-1c (live-playback parity)
+ * and is harmless for the load/scrub/drift scenarios.
+ */
+
+const HERE = dirname(fileURLToPath(import.meta.url));
+const PLAYER_PKG = resolve(HERE, "../..");
+
+export type LaunchOpts = {
+ width?: number;
+ height?: number;
+ headless?: boolean;
+};
+
+export type LoadOpts = {
+ /** Fixture name (must match a directory under tests/perf/fixtures/). */
+ fixture: string;
+ width?: number;
+ height?: number;
+ /** Override timeout in ms for the player `ready` event. Default 30s. */
+ readyTimeoutMs?: number;
+};
+
+export type LoadResult = {
+ /** Wall-clock ms from page navigation start to player `ready` event. */
+ loadMs: number;
+ /** Composition duration as reported by the player (seconds). */
+ duration: number;
+};
+
+declare global {
+ interface Window {
+ __playerReady?: boolean;
+ __playerReadyAt?: number;
+ __playerNavStart?: number;
+ __playerDuration?: number;
+ __playerError?: string;
+ }
+}
+
+function findChromeExecutable(): string | undefined {
+ if (process.env.CHROME_PATH) return process.env.CHROME_PATH;
+ if (process.env.PUPPETEER_EXECUTABLE_PATH) return process.env.PUPPETEER_EXECUTABLE_PATH;
+ const candidates = [
+ "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
+ "/Applications/Chromium.app/Contents/MacOS/Chromium",
+ "/usr/bin/google-chrome",
+ "/usr/bin/chromium-browser",
+ "/usr/bin/chromium",
+ ];
+ for (const path of candidates) {
+ if (existsSync(path)) return path;
+ }
+ return undefined;
+}
+
+export async function launchBrowser(options: LaunchOpts = {}): Promise {
+ const width = options.width ?? 1920;
+ const height = options.height ?? 1080;
+ const executablePath = findChromeExecutable();
+ if (!executablePath) {
+ throw new Error(
+ `[player-perf] no chrome executable found. Set CHROME_PATH or install Google Chrome. (looked in: $CHROME_PATH, $PUPPETEER_EXECUTABLE_PATH, /Applications/Google Chrome.app, /usr/bin/google-chrome)`,
+ );
+ }
+ const launchOptions: LaunchOptions = {
+ executablePath,
+ headless: options.headless ?? true,
+ defaultViewport: {
+ width,
+ height,
+ deviceScaleFactor: 1,
+ },
+ args: [
+ "--no-sandbox",
+ "--disable-setuid-sandbox",
+ "--disable-dev-shm-usage",
+ "--disable-accelerated-2d-canvas",
+ "--enable-webgl",
+ "--ignore-gpu-blocklist",
+ "--use-gl=angle",
+ "--use-angle=swiftshader",
+ "--font-render-hinting=none",
+ "--force-color-profile=srgb",
+ "--autoplay-policy=no-user-gesture-required",
+ `--window-size=${width},${height}`,
+ ],
+ };
+ return puppeteer.launch(launchOptions);
+}
+
+/**
+ * Navigate a page to the host shell and wait for the player's `ready` event.
+ * Returns the wall-clock ms between `Page.goto` start and the `ready` event,
+ * along with the composition duration the player reported.
+ */
+export async function loadHostPage(
+ page: Page,
+ origin: string,
+ options: LoadOpts,
+): Promise {
+ const width = options.width ?? 1920;
+ const height = options.height ?? 1080;
+ const readyTimeoutMs = options.readyTimeoutMs ?? 30_000;
+ const url = `${origin}/host.html?fixture=${encodeURIComponent(options.fixture)}&width=${width}&height=${height}`;
+
+ const t0 = performance.now();
+ await page.goto(url, { waitUntil: "domcontentloaded", timeout: readyTimeoutMs });
+ await page.waitForFunction(() => window.__playerReady === true || !!window.__playerError, {
+ timeout: readyTimeoutMs,
+ });
+ const error = await page.evaluate(() => window.__playerError ?? null);
+ if (error) throw new Error(`[player-perf] player reported error during load: ${error}`);
+ const loadMs = performance.now() - t0;
+ const duration = (await page.evaluate(() => window.__playerDuration ?? 0)) ?? 0;
+ return { loadMs, duration };
+}
+
+export function percentile(samples: number[], pct: number): number {
+ if (samples.length === 0) return 0;
+ const sorted = [...samples].sort((a, b) => a - b);
+ const idx = Math.min(sorted.length - 1, Math.max(0, Math.ceil((pct / 100) * sorted.length) - 1));
+ return sorted[idx] ?? 0;
+}
+
+export function repoPlayerDir(): string {
+ return PLAYER_PKG;
+}
diff --git a/packages/player/tests/perf/scenarios/03-load.ts b/packages/player/tests/perf/scenarios/03-load.ts
new file mode 100644
index 000000000..da52f0b58
--- /dev/null
+++ b/packages/player/tests/perf/scenarios/03-load.ts
@@ -0,0 +1,98 @@
+/**
+ * Scenario 03: composition load (cold + warm).
+ *
+ * Cold: a fresh BrowserContext per run so the network cache is empty. Measures
+ * the wall-clock time from `page.goto` until the player fires its `ready`
+ * event (host shell sets `window.__playerReady`). This stresses html parse +
+ * runtime IIFE eval + GSAP eval + the player's first composition init.
+ *
+ * Warm: same BrowserContext is reused across runs so the static assets
+ * (player bundle, runtime, GSAP, fixture HTML) are served from disk cache.
+ * This isolates the player's per-composition init cost from network I/O.
+ *
+ * Both metrics report p95 over `runs` samples and feed into perf-gate.ts:
+ * - compLoadColdP95Ms (lower is better)
+ * - compLoadWarmP95Ms (lower is better)
+ */
+
+import type { Browser } from "puppeteer-core";
+import { loadHostPage, percentile } from "../runner.ts";
+import type { Metric } from "../perf-gate.ts";
+
+export type LoadScenarioOpts = {
+ browser: Browser;
+ origin: string;
+ /** Number of cold and warm runs each. */
+ runs: number;
+ /** If null, runs the default fixture (gsap-heavy). */
+ fixture: string | null;
+};
+
+const DEFAULT_FIXTURE = "gsap-heavy";
+
+export async function runLoad(opts: LoadScenarioOpts): Promise {
+ const fixture = opts.fixture ?? DEFAULT_FIXTURE;
+ const runs = Math.max(1, opts.runs);
+ console.log(`[scenario:load] fixture=${fixture} runs=${runs}`);
+
+ const cold: number[] = [];
+ for (let i = 0; i < runs; i++) {
+ const ctx = await opts.browser.createBrowserContext();
+ try {
+ const page = await ctx.newPage();
+ const { loadMs, duration } = await loadHostPage(page, opts.origin, { fixture });
+ cold.push(loadMs);
+ console.log(
+ `[scenario:load] cold[${i + 1}/${runs}] loadMs=${loadMs.toFixed(1)} duration=${duration}s`,
+ );
+ await page.close();
+ } finally {
+ await ctx.close();
+ }
+ }
+
+ const warm: number[] = [];
+ const warmCtx = await opts.browser.createBrowserContext();
+ try {
+ const warmupPage = await warmCtx.newPage();
+ await loadHostPage(warmupPage, opts.origin, { fixture });
+ await warmupPage.close();
+
+ for (let i = 0; i < runs; i++) {
+ const page = await warmCtx.newPage();
+ const { loadMs, duration } = await loadHostPage(page, opts.origin, { fixture });
+ warm.push(loadMs);
+ console.log(
+ `[scenario:load] warm[${i + 1}/${runs}] loadMs=${loadMs.toFixed(1)} duration=${duration}s`,
+ );
+ await page.close();
+ }
+ } finally {
+ await warmCtx.close();
+ }
+
+ const coldP95 = percentile(cold, 95);
+ const warmP95 = percentile(warm, 95);
+ console.log(
+ `[scenario:load] cold p95=${coldP95.toFixed(1)}ms (samples=${cold.length}) warm p95=${warmP95.toFixed(1)}ms (samples=${warm.length})`,
+ );
+
+ return [
+ {
+ name: "comp_load_cold_p95_ms",
+ baselineKey: "compLoadColdP95Ms",
+ value: coldP95,
+ unit: "ms",
+ direction: "lower-is-better",
+ samples: cold,
+ },
+ {
+ name: "comp_load_warm_p95_ms",
+ baselineKey: "compLoadWarmP95Ms",
+ value: warmP95,
+ unit: "ms",
+ direction: "lower-is-better",
+ samples: warm,
+ },
+ ];
+}
diff --git a/packages/player/tests/perf/server.ts b/packages/player/tests/perf/server.ts
new file mode 100644
index 000000000..3cbedb588
--- /dev/null
+++ b/packages/player/tests/perf/server.ts
@@ -0,0 +1,202 @@
+import { existsSync } from "node:fs";
+import { dirname, join, resolve } from "node:path";
+import { fileURLToPath } from "node:url";
+
+/**
+ * Static file server for player perf tests.
+ *
+ * Serves all bundles, vendor scripts, fixtures, and the embed host page from
+ * a single origin so the player iframe stays same-origin. Without same-origin
+ * the runtime probe in `_onIframeLoad` falls into the cross-origin catch path
+ * and the `ready` event fires later (or not at all) — which would be measured
+ * as a player-side regression instead of an environment artifact.
+ *
+ * URL routes:
+ * / → host.html (default fixture: gsap-heavy)
+ * /host.html?fixture= → embed page hosting
+ * /player/hyperframes-player.global.js
+ * /vendor/gsap.min.js
+ * /vendor/hyperframe.runtime.iife.js
+ * /fixtures// → fixture HTML + assets
+ */
+
+const HERE = dirname(fileURLToPath(import.meta.url));
+const PLAYER_PKG = resolve(HERE, "../..");
+const REPO_ROOT = resolve(PLAYER_PKG, "../..");
+
+function firstExisting(candidates: string[]): string {
+ for (const p of candidates) {
+ if (existsSync(p)) return p;
+ }
+ return candidates[0] ?? "";
+}
+
+const PATHS = {
+ player: join(PLAYER_PKG, "dist/hyperframes-player.global.js"),
+ runtime: join(REPO_ROOT, "packages/core/dist/hyperframe.runtime.iife.js"),
+ // bun installs gsap into the package's node_modules in workspace mode, but
+ // hoists it to the repo root if multiple packages share the same version.
+ // Probe both locations so the server works regardless of layout.
+ gsap: firstExisting([
+ join(PLAYER_PKG, "node_modules/gsap/dist/gsap.min.js"),
+ join(REPO_ROOT, "node_modules/gsap/dist/gsap.min.js"),
+ ]),
+ fixturesDir: join(HERE, "fixtures"),
+} as const;
+
+export type ServeOptions = {
+ port?: number;
+ /** Disables HTTP cache so every request is a "cold" fetch. Used for cold-load scenarios. */
+ noCache?: boolean;
+};
+
+export type RunningServer = {
+ port: number;
+ origin: string;
+ stop(): Promise;
+};
+
+const MIME_TYPES: Record = {
+ ".html": "text/html; charset=utf-8",
+ ".js": "application/javascript; charset=utf-8",
+ ".mjs": "application/javascript; charset=utf-8",
+ ".css": "text/css; charset=utf-8",
+ ".json": "application/json; charset=utf-8",
+ ".png": "image/png",
+ ".jpg": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".webp": "image/webp",
+ ".mp4": "video/mp4",
+ ".webm": "video/webm",
+ ".mp3": "audio/mpeg",
+};
+
+function mimeFor(path: string): string {
+ const dot = path.lastIndexOf(".");
+ if (dot < 0) return "application/octet-stream";
+ return MIME_TYPES[path.slice(dot).toLowerCase()] ?? "application/octet-stream";
+}
+
+function buildHostHtml(fixtureName: string, width: number, height: number): string {
+ const playerSrc = "/player/hyperframes-player.global.js";
+ const fixtureSrc = `/fixtures/${fixtureName}/index.html`;
+ return `
+
+
+
+ player perf host: ${fixtureName}
+
+
+
+
+
+
+
+`;
+}
+
+async function readBunFile(path: string): Promise {
+ if (!existsSync(path)) {
+ return new Response(`Not found: ${path}`, { status: 404 });
+ }
+ const file = Bun.file(path);
+ return new Response(file, {
+ headers: {
+ "Content-Type": mimeFor(path),
+ },
+ });
+}
+
+function applyCacheHeaders(res: Response, noCache: boolean): Response {
+ if (noCache) {
+ res.headers.set("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0");
+ res.headers.set("Pragma", "no-cache");
+ res.headers.set("Expires", "0");
+ } else {
+ res.headers.set("Cache-Control", "public, max-age=3600");
+ }
+ return res;
+}
+
+export function startServer(options: ServeOptions = {}): RunningServer {
+ const noCache = options.noCache ?? false;
+
+ const server = Bun.serve({
+ port: options.port ?? 0,
+ async fetch(req) {
+ const url = new URL(req.url);
+ const path = url.pathname;
+
+ if (path === "/" || path === "/host.html") {
+ const fixture = url.searchParams.get("fixture") || "gsap-heavy";
+ const width = Number(url.searchParams.get("width") || "1920");
+ const height = Number(url.searchParams.get("height") || "1080");
+ const html = buildHostHtml(fixture, width, height);
+ return applyCacheHeaders(
+ new Response(html, { headers: { "Content-Type": "text/html; charset=utf-8" } }),
+ noCache,
+ );
+ }
+
+ if (path === "/player/hyperframes-player.global.js") {
+ return applyCacheHeaders(await readBunFile(PATHS.player), noCache);
+ }
+
+ if (path === "/vendor/hyperframe.runtime.iife.js") {
+ return applyCacheHeaders(await readBunFile(PATHS.runtime), noCache);
+ }
+
+ if (path === "/vendor/gsap.min.js") {
+ return applyCacheHeaders(await readBunFile(PATHS.gsap), noCache);
+ }
+
+ if (path.startsWith("/fixtures/")) {
+ const rel = path.replace(/^\/fixtures\//, "");
+ const filePath = join(PATHS.fixturesDir, rel);
+ if (!filePath.startsWith(PATHS.fixturesDir)) {
+ return new Response("Forbidden", { status: 403 });
+ }
+ return applyCacheHeaders(await readBunFile(filePath), noCache);
+ }
+
+ return new Response("Not found", { status: 404 });
+ },
+ });
+
+ // server.port is `number | undefined` in Bun's types (undefined only for unix-socket
+ // servers, which we never use). Narrow it once at startup so the rest of the perf
+ // harness can rely on a numeric origin.
+ const port = server.port;
+ if (port === undefined) {
+ throw new Error("[player-perf] Bun.serve did not assign a TCP port");
+ }
+ return {
+ port,
+ origin: `http://127.0.0.1:${port}`,
+ async stop() {
+ server.stop(true);
+ },
+ };
+}
diff --git a/packages/player/tests/perf/tsconfig.json b/packages/player/tests/perf/tsconfig.json
new file mode 100644
index 000000000..bafd58af9
--- /dev/null
+++ b/packages/player/tests/perf/tsconfig.json
@@ -0,0 +1,16 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "ESNext",
+ "moduleResolution": "bundler",
+ "lib": ["ES2022", "DOM", "DOM.Iterable"],
+ "strict": true,
+ "esModuleInterop": true,
+ "skipLibCheck": true,
+ "noEmit": true,
+ "types": ["bun"],
+ "allowImportingTsExtensions": true,
+ "resolveJsonModule": true
+ },
+ "include": ["**/*.ts"]
+}