From 3e8b0324487e0ea7dd420414c22d8dbdb8d3cb1a Mon Sep 17 00:00:00 2001 From: Vaibhav Srivastav Date: Wed, 22 Apr 2026 18:42:26 +0530 Subject: [PATCH] Add mobile performance skills --- .../build-ios-apps/.codex-plugin/plugin.json | 6 +- plugins/build-ios-apps/README.md | 4 + plugins/build-ios-apps/agents/openai.yaml | 4 +- .../skills/ios-ettrace-performance/SKILL.md | 197 +++++++++++ .../agents/openai.yaml | 4 + .../scripts/analyze_flamegraph_json.py | 327 ++++++++++++++++++ .../scripts/collect_ios_dsyms.sh | 253 ++++++++++++++ .../skills/ios-memgraph-leaks/SKILL.md | 76 ++++ .../ios-memgraph-leaks/agents/openai.yaml | 4 + .../scripts/capture_sim_memgraph.sh | 143 ++++++++ .../scripts/summarize_memgraph_leaks.py | 159 +++++++++ .../.codex-plugin/plugin.json | 16 +- plugins/test-android-apps/agents/openai.yaml | 4 +- .../skills/android-performance/SKILL.md | 279 +++++++++++++++ .../android-performance/agents/openai.yaml | 4 + .../scripts/heapprofd_reports.sh | 133 +++++++ .../scripts/simpleperf_hotspots.sh | 247 +++++++++++++ 17 files changed, 1848 insertions(+), 12 deletions(-) create mode 100644 plugins/build-ios-apps/skills/ios-ettrace-performance/SKILL.md create mode 100644 plugins/build-ios-apps/skills/ios-ettrace-performance/agents/openai.yaml create mode 100755 plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/analyze_flamegraph_json.py create mode 100755 plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/collect_ios_dsyms.sh create mode 100644 plugins/build-ios-apps/skills/ios-memgraph-leaks/SKILL.md create mode 100644 plugins/build-ios-apps/skills/ios-memgraph-leaks/agents/openai.yaml create mode 100755 plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/capture_sim_memgraph.sh create mode 100755 plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/summarize_memgraph_leaks.py create mode 100644 plugins/test-android-apps/skills/android-performance/SKILL.md create mode 100644 plugins/test-android-apps/skills/android-performance/agents/openai.yaml create mode 100755 plugins/test-android-apps/skills/android-performance/scripts/heapprofd_reports.sh create mode 100755 plugins/test-android-apps/skills/android-performance/scripts/simpleperf_hotspots.sh diff --git a/plugins/build-ios-apps/.codex-plugin/plugin.json b/plugins/build-ios-apps/.codex-plugin/plugin.json index 3ba6927a..dad70ed4 100644 --- a/plugins/build-ios-apps/.codex-plugin/plugin.json +++ b/plugins/build-ios-apps/.codex-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "build-ios-apps", "version": "0.1.0", - "description": "Build iOS apps with workflows for App Intents, SwiftUI UI work, refactors, performance audits, and simulator debugging.", + "description": "Build iOS apps with workflows for App Intents, SwiftUI UI work, refactors, performance profiling, leak investigation, and simulator debugging.", "author": { "name": "OpenAI", "email": "support@openai.com", @@ -27,7 +27,7 @@ "interface": { "displayName": "Build iOS Apps", "shortDescription": "Build, refine, and debug iOS apps with App Intents, SwiftUI, and Xcode workflows", - "longDescription": "Use Build iOS Apps to design App Intents and App Shortcuts, build or refactor SwiftUI UI, adopt modern iOS patterns such as Liquid Glass, audit runtime performance, and debug apps on simulators with XcodeBuildMCP-backed workflows.", + "longDescription": "Use Build iOS Apps to design App Intents and App Shortcuts, build or refactor SwiftUI UI, adopt modern iOS patterns such as Liquid Glass, audit runtime performance, capture ETTrace profiles, investigate memory leaks, and debug apps on simulators with XcodeBuildMCP-backed workflows.", "developerName": "OpenAI", "category": "Coding", "capabilities": [ @@ -38,7 +38,7 @@ "websiteURL": "https://openai.com/", "privacyPolicyURL": "https://openai.com/policies/privacy-policy/", "termsOfServiceURL": "https://openai.com/policies/terms-of-use/", - "defaultPrompt": "Design App Intents or App Shortcuts, build or refactor SwiftUI UI, audit performance, adopt modern iOS APIs, or debug an app on a simulator", + "defaultPrompt": "Design App Intents or App Shortcuts, build or refactor SwiftUI UI, profile performance, investigate leaks, adopt modern iOS APIs, or debug an app on a simulator", "brandColor": "#0A84FF", "composerIcon": "./assets/build-ios-apps-small.svg", "logo": "./assets/app-icon.png", diff --git a/plugins/build-ios-apps/README.md b/plugins/build-ios-apps/README.md index 1926fb52..71a8842d 100644 --- a/plugins/build-ios-apps/README.md +++ b/plugins/build-ios-apps/README.md @@ -5,6 +5,8 @@ This plugin packages iOS and Swift workflows in `plugins/build-ios-apps`. It currently includes these skills: - `ios-debugger-agent` +- `ios-ettrace-performance` +- `ios-memgraph-leaks` - `ios-app-intents` - `swiftui-liquid-glass` - `swiftui-performance-audit` @@ -17,6 +19,8 @@ It currently includes these skills: - building and refactoring SwiftUI UI using current platform patterns - reviewing or adopting iOS 26+ Liquid Glass APIs - auditing SwiftUI performance and guiding profiling workflows +- capturing symbolicated ETTrace simulator profiles for focused app flows +- capturing and comparing iOS memgraphs to root-cause leaks - debugging iOS apps on simulators with XcodeBuildMCP-backed flows - restructuring large SwiftUI views toward smaller, more stable compositions diff --git a/plugins/build-ios-apps/agents/openai.yaml b/plugins/build-ios-apps/agents/openai.yaml index f523006e..6dede39a 100644 --- a/plugins/build-ios-apps/agents/openai.yaml +++ b/plugins/build-ios-apps/agents/openai.yaml @@ -1,6 +1,6 @@ interface: display_name: "Build iOS Apps" - short_description: "Build, refine, and debug iOS apps with App Intents, SwiftUI, and Xcode workflows" + short_description: "Build, profile, debug, and refine iOS apps with SwiftUI and Xcode workflows" icon_small: "./assets/build-ios-apps-small.svg" icon_large: "./assets/app-icon.png" - default_prompt: "Use Build iOS Apps to design App Intents or App Shortcuts, build or refactor SwiftUI UI, audit performance, adopt modern iOS APIs, or debug an iOS app on a simulator." + default_prompt: "Use Build iOS Apps to design App Intents, build or refactor SwiftUI UI, capture ETTrace profiles, investigate memory leaks, adopt modern iOS APIs, or debug an iOS app on a simulator." diff --git a/plugins/build-ios-apps/skills/ios-ettrace-performance/SKILL.md b/plugins/build-ios-apps/skills/ios-ettrace-performance/SKILL.md new file mode 100644 index 00000000..9e6520a1 --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-ettrace-performance/SKILL.md @@ -0,0 +1,197 @@ +--- +name: ios-ettrace-performance +description: Capture and interpret ETTrace profiles for iOS simulator apps, including symbolicated launch and runtime flamegraphs. Use when asked to profile an iOS app flow, gather simulator performance traces, identify CPU-heavy stacks, compare before/after traces, or produce flamegraph evidence for startup, scrolling, navigation, rendering, or other user-visible latency. +--- + +# iOS ETTrace Performance + +Use this skill to capture a focused, symbolicated ETTrace profile from an iOS simulator app. Pair it with `../ios-debugger-agent/SKILL.md` when the task also needs simulator build, install, launch, UI driving, logs, or screenshots. + +## Core Workflow + +1. Pick one focused flow and write down the expected start and stop points. +2. Build the exact simulator app that will be installed and profiled. +3. Temporarily link ETTrace into that app target for simulator/debug profiling. +4. Collect UUID-matched dSYMs for the app executable and embedded dynamic frameworks. +5. Capture one launch or runtime trace. +6. Preserve the processed flamegraph JSON immediately after the run. +7. Analyze only the processed JSON and report the flow, artifacts, hotspots, and caveats. + +Avoid broad "use the app for a while" captures. One trace should correspond to one user-visible flow. + +## Setup + +Use a writable run folder for each profiling session: + +```bash +if [ -z "${RUN_DIR:-}" ]; then + RUN_DIR="$(mktemp -d "${TMPDIR:-/tmp}/codex-ios-ettrace.XXXXXX")" +fi +mkdir -p "$RUN_DIR" +``` + +Install the ETTrace runner CLI if it is not already available: + +```bash +brew install emergetools/homebrew-tap/ettrace +``` + +`ettrace` is the host-side macOS runner. The app must also link an `ETTrace.xcframework` for the iOS Simulator architecture. +This workflow is validated for ETTrace v1.1.0 processed `output_.json` files with top-level `nodes`. + +## Link ETTrace Into The App + +Wire ETTrace into the exact app target being profiled. Keep the integration in a clearly temporary patch and remove it when the profiling task is done unless the user explicitly asks to keep it. + +Preferred options: + +- Reuse an existing simulator-compatible `ETTrace.xcframework` if the repo already vendors one. +- If none exists, build a simulator-only copy into `RUN_DIR` from the upstream ETTrace package. +- Link the framework directly into the app target, not only into tests, resources, data files, or a nested launcher target. +- Confirm launch logs print `Starting ETTrace`. +- Profile only one ETTrace-instrumented simulator app at a time because simulator mode listens on a fixed localhost port. + +Build a simulator framework when needed: + +```bash +ETTRACE_TAG="${ETTRACE_TAG:-v1.1.0}" # Override to match the installed runner when Homebrew updates. +ETTRACE_SRC="$RUN_DIR/ETTrace-src" +if [ ! -d "$ETTRACE_SRC" ]; then + git clone --depth 1 --branch "$ETTRACE_TAG" https://github.com/EmergeTools/ETTrace "$ETTRACE_SRC" +fi + +rm -rf "$RUN_DIR/ETTrace-iphonesimulator.xcarchive" "$RUN_DIR/ETTrace.xcframework" +pushd "$ETTRACE_SRC" >/dev/null +xcodebuild archive \ + -scheme ETTrace \ + -archivePath "$RUN_DIR/ETTrace-iphonesimulator.xcarchive" \ + -sdk iphonesimulator \ + -destination 'generic/platform=iOS Simulator' \ + BUILD_LIBRARY_FOR_DISTRIBUTION=YES \ + INSTALL_PATH='Library/Frameworks' \ + SKIP_INSTALL=NO \ + CLANG_CXX_LANGUAGE_STANDARD=c++17 + +xcodebuild -create-xcframework \ + -framework "$RUN_DIR/ETTrace-iphonesimulator.xcarchive/Products/Library/Frameworks/ETTrace.framework" \ + -output "$RUN_DIR/ETTrace.xcframework" +popd >/dev/null +``` + +For Bazel apps, a temporary import usually looks like: + +```python +load("@rules_apple//apple:apple.bzl", "apple_dynamic_xcframework_import") + +package(default_visibility = ["//visibility:public"]) + +apple_dynamic_xcframework_import( + name = "ETTrace", + xcframework_imports = glob(["ETTrace.xcframework/**"]), +) +``` + +For Xcode projects, temporarily add the simulator `ETTrace.xcframework` to the app target's Link Binary With Libraries / Embed Frameworks phases for the debug simulator build you are profiling, then remove that wiring after profiling. + +## Symbolication Gate + +Do not draw conclusions from an unsymbolicated flamegraph. Before every capture, prepare a dSYM folder that includes the app dSYM and any embedded first-party dynamic framework dSYMs. + +Collect dSYMs after the final build that produced the installed app: + +```bash +SKILL_DIR="" +APP="" +DSYMS="$RUN_DIR/dsyms" + +"$SKILL_DIR/scripts/collect_ios_dsyms.sh" \ + --app "$APP" \ + --out-dir "$DSYMS" \ + --search-root "$(dirname "$APP")" \ + --search-root "$PWD" \ + --extra-dsym "$RUN_DIR/ETTrace-iphonesimulator.xcarchive/dSYMs/ETTrace.framework.dSYM" +``` + +Add `--require-framework ` for app-owned dynamic frameworks that must symbolicate; use `--require-all-frameworks` only when every embedded framework is app-owned or expected to have symbols. If the helper reports a missing required app or framework dSYM, rebuild the exact simulator app with dSYM generation before tracing, or add the build output directory that contains those dSYMs as another `--search-root`. + +Verify important UUIDs before tracing when the report looks suspicious: + +```bash +dwarfdump --uuid "$APP/$(/usr/libexec/PlistBuddy -c 'Print :CFBundleExecutable' "$APP/Info.plist")" +find "$DSYMS" -maxdepth 1 -type d -name '*.dSYM' -print -exec dwarfdump --uuid {} \; +``` + +After ETTrace exits, read its symbolication summary. Treat meaningful first-party "have library but no symbol" lines as a failed trace unless they are tiny noise. Unsymbolicated system-framework or ETTrace internal buckets are usually acceptable. + +## Capture + +For launch traces: + +```bash +cd "$RUN_DIR" +CAPTURE_MARKER="$RUN_DIR/.ettrace-capture-start" +: > "$CAPTURE_MARKER" +find "$RUN_DIR" -maxdepth 1 \( -name 'output.json' -o -name 'output_*.json' \) -delete +ettrace --simulator --launch --verbose --dsyms "$DSYMS" +``` + +Use `--launch` only when measuring startup or first render. The first launch connection can force quit the app; relaunch from the simulator home screen rather than Xcode if prompted. For first-launch-after-install traces, temporarily set `ETTraceRunAtStartup=YES` in the app Info.plist, then run `ettrace --simulator` and launch from the home screen. + +For runtime flow traces: + +```bash +cd "$RUN_DIR" +CAPTURE_MARKER="$RUN_DIR/.ettrace-capture-start" +: > "$CAPTURE_MARKER" +find "$RUN_DIR" -maxdepth 1 \( -name 'output.json' -o -name 'output_*.json' \) -delete +ettrace --simulator --verbose --dsyms "$DSYMS" +``` + +Start from a stable screen, start ETTrace, perform exactly one focused flow, wait until visible work is complete, then stop the runner. For wider attribution, add `--multi-thread`; otherwise start with the main thread. + +In Codex, run `ettrace` with a TTY and answer prompts with `write_stdin`. Without a TTY, the runner can exit without a useful trace. + +## Preserve Outputs + +The next ETTrace run can overwrite processed flamegraph files, so preserve fresh `output_.json` files immediately. Do not analyze a saved `output.json`; ETTrace also serves a viewer route with that name, and raw `emerge-output/output.json` files are not the processed flamegraph artifacts this workflow expects. + +```bash +PRESERVED_DIR="$(mktemp -d "$RUN_DIR/run-$(date +%Y%m%d-%H%M%S).XXXXXX")" +: > "$PRESERVED_DIR/summary.txt" +if [ ! -e "$CAPTURE_MARKER" ]; then + echo "error: capture marker missing; start a fresh ETTrace capture before preserving outputs" >&2 + exit 1 +fi +find "$RUN_DIR" -maxdepth 1 -name 'output_*.json' -newer "$CAPTURE_MARKER" -print | while IFS= read -r json; do + preserved="$PRESERVED_DIR/${json##*/}" + cp "$json" "$preserved" + { + echo "## ${preserved##*/}" + python3 "$SKILL_DIR/scripts/analyze_flamegraph_json.py" "$preserved" + } >> "$PRESERVED_DIR/summary.txt" +done +if [ ! -s "$PRESERVED_DIR/summary.txt" ]; then + echo "error: no fresh processed ETTrace output JSON found in $RUN_DIR" >&2 + exit 1 +fi +``` + +Analyze only processed `output_*.json` files in `RUN_DIR`. Ignore `output.json` and raw `emerge-output/output.json` files unless debugging ETTrace itself. If the analyzer rejects the JSON shape, capture again with the Homebrew ETTrace runner and matching app-side `ETTrace.xcframework` tag instead of trying to interpret the rejected file. + +## Read The Profile + +Start from `run-*/summary.txt`, then inspect processed JSON directly if needed. + +Report: + +- exact flow, app build, simulator model/runtime, and run count +- processed flamegraph JSON paths +- top active leaves and inclusive first-party stacks with sample weights or percentages +- whether symbols were complete for app-owned binaries +- caveats such as first-run setup, simulator-only cost, network variance, or low sample count +- before/after deltas only when the same flow was captured with comparable setup + + +## Cleanup + +Remove temporary ETTrace app wiring when profiling is complete unless the user asked to keep it. Keep or discard run artifacts based on the active task. diff --git a/plugins/build-ios-apps/skills/ios-ettrace-performance/agents/openai.yaml b/plugins/build-ios-apps/skills/ios-ettrace-performance/agents/openai.yaml new file mode 100644 index 00000000..7d04b02f --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-ettrace-performance/agents/openai.yaml @@ -0,0 +1,4 @@ +interface: + display_name: "iOS ETTrace Performance" + short_description: "Profile symbolicated iOS simulator flows with ETTrace" + default_prompt: "Use $ios-ettrace-performance to capture a focused iOS simulator ETTrace profile and identify time-heavy stacks." diff --git a/plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/analyze_flamegraph_json.py b/plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/analyze_flamegraph_json.py new file mode 100755 index 00000000..542a9aa3 --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/analyze_flamegraph_json.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python3 +"""Summarize ETTrace processed flamegraph JSON for performance triage. + +This helper intentionally accepts only the Homebrew ETTrace v1.1.0 processed +flamegraph shape: one `output_.json` file with a top-level `nodes` +tree. ETTrace raw capture JSON usually lives under an `emerge-output/` temp +folder and has keys such as `threads` and `libraryInfo`; this script rejects +that shape because it has not been symbolicated into flamegraph nodes. + +ETTrace v1.1.0 stores `duration` as inclusive time on every real frame and +appends an empty terminal child with zero duration to preserve same-name stack +buckets. The strict validation here is deliberate: a malformed or legacy file +should fail loudly instead of producing misleading hotspot evidence. +""" + +from __future__ import annotations + +import argparse +import json +import math +import sys +from collections import defaultdict +from pathlib import Path +from typing import Any + + +sys.setrecursionlimit(200_000) + +IDLE_FRAMES = { + "mach_msg_trap", + "__psynch_cvwait", + "semaphore_wait_trap", + "kevent_id", + "__ulock_wait", + "__workq_kernreturn", + "__semwait_signal", + "nanosleep", + "poll", + "select", + "start_wqthread", +} + +WRAPPER_FRAME_EXACT = { + "start", + "main", + "libsystem_kernel.dylib", + "UIApplicationMain", + "-[UIApplication _run]", + "GSEventRunModal", + "_CFRunLoopRunSpecificWithOptions", + "__CFRunLoopRun", + "__CFRunLoopDoSource0", + "__CFRunLoopDoSource1", + "__CFRunLoopServiceMachPort", + "__CFMachPortPerform", + "__CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE0_PERFORM_FUNCTION__", + "__CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE1_PERFORM_FUNCTION__", + "__CFRUNLOOP_IS_SERVICING_THE_MAIN_DISPATCH_QUEUE__", + "_dispatch_client_callout", + "_dispatch_main_queue_callback_4CF", + "_dispatch_main_queue_drain", +} + +WRAPPER_FRAME_PREFIXES = ( + "runApp<", + "closure #1 in App.", +) + +APP_ENTRYPOINT_SUFFIXES = ( + ".$main()", + ".main()", + ".mainApp()", +) + + +def is_idle(frame: str) -> bool: + """Return whether a frame represents a blocked or sleeping thread.""" + return frame in IDLE_FRAMES + + +def is_unattributed(frame: str) -> bool: + """Return whether ETTrace could not map a sample to a symbol.""" + return frame == "" + + +def is_wrapper_frame(frame: str) -> bool: + """Return whether an inclusive frame is generic app/run-loop scaffolding.""" + if frame in WRAPPER_FRAME_EXACT: + return True + + if any(frame.startswith(prefix) for prefix in WRAPPER_FRAME_PREFIXES): + return True + + if frame.startswith("static ") and any(frame.endswith(suffix) for suffix in APP_ENTRYPOINT_SUFFIXES): + return True + + return False + + +def matches_any_pattern(frame: str, patterns: tuple[str, ...]) -> bool: + """Return whether a frame matches any case-insensitive focus substring.""" + lowered = frame.lower() + return any(pattern.lower() in lowered for pattern in patterns) + + +def display_name(node: dict[str, Any]) -> str: + """Return the frame name for one processed flamegraph node.""" + return str(node.get("name") or "") + + +def children_of(node: dict[str, Any]) -> list[dict[str, Any]]: + """Return child nodes while tolerating ETTrace's singleton-child variant.""" + if "children" not in node: + raise ValueError("Processed ETTrace node is missing `children`.") + + children = node["children"] + if isinstance(children, dict): + return [children] + + if isinstance(children, list): + if not all(isinstance(child, dict) for child in children): + raise ValueError("Processed ETTrace node has a non-object child entry.") + return children + + raise ValueError("Processed ETTrace node has invalid `children`.") + + +def node_weight(node: dict[str, Any]) -> float: + """Return the inclusive `duration` stored on one ETTrace v1.1.0 node.""" + if "duration" not in node: + raise ValueError("Processed ETTrace node is missing `duration`.") + + value = node["duration"] + if isinstance(value, bool) or not isinstance(value, (int, float)): + raise ValueError("Processed ETTrace node has invalid `duration`.") + + duration = float(value) + if not math.isfinite(duration): + raise ValueError("Processed ETTrace node has invalid `duration`.") + + return duration + + +def collect_frame_weights( + node: dict[str, Any], + self_weights: dict[str, float], + inclusive_weights: dict[str, float], +) -> tuple[float, float]: + """Aggregate self and active-inclusive weights from one flamegraph subtree.""" + name = display_name(node) + weight = node_weight(node) + children = children_of(node) + child_weight = 0.0 + child_active_weight = 0.0 + + for child in children: + total_child_weight, active_child_weight = collect_frame_weights( + child, + self_weights, + inclusive_weights, + ) + child_weight += total_child_weight + child_active_weight += active_child_weight + + active_weight = child_active_weight + + if name and name != "": + self_weight = max(weight - child_weight, 0) + if not children: + self_weight = weight + if self_weight > 0: + self_weights[name] += self_weight + + if not is_unattributed(name) and not is_idle(name): + active_weight += self_weight + inclusive_weights[name] += active_weight + + return weight, active_weight + + +def thread_root_node(payload: dict[str, Any]) -> dict[str, Any] | None: + """Return the top-level `nodes` tree from ETTrace v1.1.0 processed JSON.""" + root = payload.get("nodes") + if isinstance(root, dict): + return root + + return None + + +def parse_flamegraph(path: Path): + """Read processed ETTrace JSON and aggregate totals used by the report.""" + with path.open(encoding="utf-8") as file: + payload = json.load(file) + + if not isinstance(payload, dict): + raise ValueError("Processed ETTrace JSON must be an object.") + + if "threadNodes" in payload: + raise ValueError( + "This looks like an intermediate or legacy ETTrace flamegraph shape with `threadNodes`. " + "Use Homebrew ETTrace v1.1.0 output_.json with top-level `nodes`.", + ) + + if "threads" in payload and "libraryInfo" in payload: + raise ValueError( + "This looks like ETTrace raw capture JSON, not processed flamegraph JSON. " + "Use the output_.json written in the directory where ettrace was run.", + ) + + thread_root = thread_root_node(payload) + if thread_root is None: + raise ValueError("Missing processed flamegraph nodes; this does not look like ETTrace flamegraph JSON.") + + self_weights = defaultdict(float) + active_inclusive_weights = defaultdict(float) + total = 0.0 + idle = 0.0 + unattributed = 0.0 + + thread_summaries = [] + thread_name = path.stem + thread_self_weights: dict[str, float] = defaultdict(float) + thread_inclusive_weights: dict[str, float] = defaultdict(float) + collect_frame_weights(thread_root, thread_self_weights, thread_inclusive_weights) + + thread_total = sum(thread_self_weights.values()) + thread_summaries.append((thread_total, str(thread_name))) + total += thread_total + + for frame, weight in thread_self_weights.items(): + self_weights[frame] += weight + if is_unattributed(frame): + unattributed += weight + continue + + if is_idle(frame): + idle += weight + + for frame, weight in thread_inclusive_weights.items(): + if not is_unattributed(frame) and not is_idle(frame): + active_inclusive_weights[frame] += weight + + return total, idle, unattributed, self_weights, active_inclusive_weights, thread_summaries + + +def print_top( + title: str, + rows: list[tuple[float, str]], + denominator: float, + limit: int, + percentage_label: str, +) -> None: + """Print a ranked table where percentages use the requested denominator.""" + print(f"\n{title}") + for weight, frame in rows[:limit]: + percent = weight / denominator * 100 if denominator else 0 + print(f"{weight:10.6f} {percent:7.2f}%{percentage_label} {frame}") + + +def main() -> None: + """Parse arguments, summarize the flamegraph, and print ranked sections.""" + parser = argparse.ArgumentParser( + description="Summarize ETTrace processed flamegraph JSON, excluding idle self frames from active percentages.", + ) + parser.add_argument( + "json", + type=Path, + help="Path to ETTrace v1.1.0 processed output_.json.", + ) + parser.add_argument("--top", type=int, default=40, help="Number of rows to print per section.") + parser.add_argument( + "--pattern", + action="append", + dest="patterns", + help=( + "Inclusive-frame substring to include in the focused section. Can be repeated. " + "If omitted, all inclusive frames are shown." + ), + ) + parser.add_argument( + "--show-wrappers", + action="store_true", + help="Include app entrypoint, run loop, and other wrapper frames in inclusive output.", + ) + args = parser.parse_args() + + try: + total, idle, unattributed, self_weights, active_inclusive_weights, thread_summaries = parse_flamegraph( + args.json, + ) + except (OSError, ValueError, json.JSONDecodeError) as error: + print(f"error: {error}", file=sys.stderr) + raise SystemExit(1) + + active = total - idle - unattributed + patterns = tuple(args.patterns) if args.patterns else () + + print(f"Trace: {args.json}") + print(f"Total duration: {total:.6f}") + print(f"Idle self total: {idle:.6f}") + print(f"Unattributed total: {unattributed:.6f}") + print(f"Active total: {active:.6f}") + + thread_summaries.sort(reverse=True) + print_top("Threads", thread_summaries, total, min(args.top, len(thread_summaries)), "total") + + active_self_frames = [ + (weight, frame) + for frame, weight in self_weights.items() + if not is_idle(frame) and not is_unattributed(frame) + ] + active_self_frames.sort(reverse=True) + print_top("Top active self frames", active_self_frames, active, args.top, "active") + + inclusive_rows = [ + (weight, frame) + for frame, weight in active_inclusive_weights.items() + if not patterns or matches_any_pattern(frame, patterns) + if args.show_wrappers or not is_wrapper_frame(frame) + ] + inclusive_rows.sort(reverse=True) + section_title = "Top focused inclusive frames" if patterns else "Top inclusive frames" + print_top(section_title, inclusive_rows, active, args.top, "active") + + +if __name__ == "__main__": + main() diff --git a/plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/collect_ios_dsyms.sh b/plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/collect_ios_dsyms.sh new file mode 100755 index 00000000..2f667b4e --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-ettrace-performance/scripts/collect_ios_dsyms.sh @@ -0,0 +1,253 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat >&2 <<'USAGE' +Usage: collect_ios_dsyms.sh --app App.app --out-dir DIR [options] + +Collects UUID-matched dSYMs for a built iOS simulator app into DIR. + +Required: + --app PATH Built .app bundle + --out-dir DIR Destination dSYM directory + +Optional: + --search-root DIR Directory to search for .dSYM bundles (repeatable) + --extra-dsym DIR Known .dSYM bundle to include in candidates (repeatable) + --require-framework NAME Require a matching dSYM for an embedded framework + --require-all-frameworks Require matching dSYMs for every embedded framework + +Example: + collect_ios_dsyms.sh --app build/Debug-iphonesimulator/MyApp.app \ + --out-dir /tmp/profile/dsyms \ + --search-root build \ + --search-root ~/Library/Developer/Xcode/DerivedData +USAGE +} + +require_value() { + local flag="$1" + local value="${2:-}" + if [[ -z "$value" ]]; then + echo "$flag requires a value" >&2 + usage + exit 2 + fi +} + +app_path="" +out_dir="" +require_all_frameworks=false +search_roots=() +extra_dsyms=() +required_frameworks=() + +while [[ $# -gt 0 ]]; do + case "$1" in + --app) + require_value "$1" "${2:-}" + app_path="$2" + shift 2 + ;; + --out-dir) + require_value "$1" "${2:-}" + out_dir="$2" + shift 2 + ;; + --search-root) + require_value "$1" "${2:-}" + search_roots+=("$2") + shift 2 + ;; + --extra-dsym) + require_value "$1" "${2:-}" + extra_dsyms+=("$2") + shift 2 + ;; + --require-framework) + require_value "$1" "${2:-}" + required_frameworks+=("$2") + shift 2 + ;; + --require-all-frameworks) + require_all_frameworks=true + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 2 + ;; + esac +done + +if [[ -z "$app_path" || -z "$out_dir" ]]; then + usage + exit 2 +fi + +if [[ ! -d "$app_path" ]]; then + echo "error: app bundle not found: $app_path" >&2 + exit 1 +fi + +app_path="$(cd "$(dirname "$app_path")" && pwd)/$(basename "$app_path")" +mkdir -p "$out_dir" +out_dir="$(cd "$out_dir" && pwd)" +candidates_file="$out_dir/dsym-candidates.txt" + +if [[ -f "$app_path/Info.plist" ]]; then + executable="$(/usr/libexec/PlistBuddy -c 'Print :CFBundleExecutable' "$app_path/Info.plist")" +else + executable="$(basename "$app_path" .app)" +fi + +app_binary="$app_path/$executable" +if [[ ! -f "$app_binary" ]]; then + echo "error: app executable not found: $app_binary" >&2 + exit 1 +fi + +default_roots=( + "$(dirname "$app_path")" + "$PWD" + "$PWD/build" + "$PWD/bazel-bin" + "$PWD/bazel-out" +) + +for root in "${default_roots[@]}"; do + if [[ -d "$root" ]]; then + search_roots+=("$root") + fi +done + +if [[ -d "$HOME/Library/Developer/Xcode/DerivedData" ]]; then + search_roots+=("$HOME/Library/Developer/Xcode/DerivedData") +fi + +: > "$candidates_file" +if [[ ${#search_roots[@]} -gt 0 ]]; then + find -L "${search_roots[@]}" -type d -name "*.dSYM" -prune -print 2>/dev/null >> "$candidates_file" || true +fi + +if [[ ${#extra_dsyms[@]} -gt 0 ]]; then + for dsym in "${extra_dsyms[@]}"; do + if [[ -d "$dsym" ]]; then + printf '%s\n' "$dsym" >> "$candidates_file" + fi + done +fi + +awk '!seen[$0]++' "$candidates_file" > "$candidates_file.tmp" +mv "$candidates_file.tmp" "$candidates_file" + +if [[ ! -s "$candidates_file" ]]; then + echo "error: no dSYM candidates found. Add --search-root pointing at build output or DerivedData." >&2 + exit 1 +fi + +contains_required_framework() { + local framework_name="$1" + if [[ ${#required_frameworks[@]} -eq 0 ]]; then + return 1 + fi + + for required in "${required_frameworks[@]}"; do + if [[ "$required" == "$framework_name" || "$required" == "${framework_name%.framework}" ]]; then + return 0 + fi + done + return 1 +} + +copy_matching_dsym() { + local binary="$1" + local label="$2" + local required="$3" + + if [[ ! -f "$binary" ]]; then + return 0 + fi + + local binary_uuids=() + while IFS= read -r uuid; do + [[ -n "$uuid" ]] && binary_uuids+=("$uuid") + done < <(dwarfdump --uuid "$binary" 2>/dev/null | awk '{ print $2 }') + + if [[ ${#binary_uuids[@]} -eq 0 ]]; then + if [[ "$required" == "required" ]]; then + echo "error: could not read UUID for required $label: $binary" >&2 + return 1 + fi + + echo "warning: could not read UUID for $label: $binary" >&2 + return 0 + fi + + local match="" + local candidate_uuids="" + local has_all_uuids="" + while IFS= read -r candidate; do + candidate_uuids="$(dwarfdump --uuid "$candidate" 2>/dev/null | awk '{ print $2 }' || true)" + if [[ -z "$candidate_uuids" ]]; then + continue + fi + has_all_uuids=true + for uuid in "${binary_uuids[@]}"; do + if ! grep -Fxq "$uuid" <<< "$candidate_uuids"; then + has_all_uuids=false + break + fi + done + + if [[ "$has_all_uuids" == "true" ]]; then + match="$candidate" + break + fi + done < "$candidates_file" + + if [[ -z "$match" ]]; then + if [[ "$required" == "required" ]]; then + echo "error: missing required dSYM for $label UUIDs ${binary_uuids[*]}" >&2 + return 1 + fi + + echo "warning: missing dSYM for $label UUIDs ${binary_uuids[*]}" >&2 + return 0 + fi + + local dest="$out_dir/$(basename "$match")" + rm -rf "$dest" + cp -R "$match" "$dest" + printf 'matched %s UUIDs %s -> %s\n' "$label" "${binary_uuids[*]}" "$dest" +} + +copy_matching_dsym "$app_binary" "$executable.app" required + +if [[ -d "$app_path/Frameworks" ]]; then + for framework in "$app_path"/Frameworks/*.framework; do + [[ -d "$framework" ]] || continue + + framework_name="$(basename "$framework")" + framework_binary="$framework/${framework_name%.framework}" + required="optional" + + if [[ "$require_all_frameworks" == "true" ]] || contains_required_framework "$framework_name"; then + required="required" + fi + + copy_matching_dsym "$framework_binary" "$framework_name" "$required" + done +fi + +cat < ` and grouped leak evidence. +5. Make the smallest root-cause patch, then recapture the same flow on the same simulator when possible. +6. Report proof: before/after leak counts, disappeared root types, remaining leaks, memgraph paths, and test/build results. + +Do not claim a leak fix from a smaller memgraph alone. A credible fix explains the ownership path that kept the object alive and shows that the same path or type disappears after the patch. + +## Capture + +Prefer capturing from the simulator already used for the reproduction. Resolve the simulator UDID and app bundle identifier, then capture the running app: + +```bash +SKILL_DIR="" +SIM="" +BUNDLE_ID="" +MEMGRAPH_DIR="$(mktemp -d "${TMPDIR:-/tmp}/codex-ios-memgraph.XXXXXX")" + +"$SKILL_DIR/scripts/capture_sim_memgraph.sh" \ + --udid "$SIM" \ + --bundle-id "$BUNDLE_ID" \ + --out-dir "$MEMGRAPH_DIR" +``` + +Do not derive `SKILL_DIR` from the target app repo's `pwd`; installed plugins usually live outside the app being debugged. Store captures in a run-specific temp or user-chosen folder, not under `SKILL_DIR`. + +If the process cannot be found, confirm the bundle identifier and use `xcrun simctl spawn "$SIM" launchctl list` to inspect running labels. + +## Summarize + +Summarize an existing memgraph: + +```bash +"$SKILL_DIR/scripts/summarize_memgraph_leaks.py" \ + /path/to/app.memgraph \ + --trace-limit 5 \ + --out /path/to/leak-summary.md +``` + +Use `--trace-limit` sparingly. Trace trees are useful root-cause evidence, but large memgraphs can produce noisy output. If a trace tree says `Found 0 roots referencing`, treat it as an unreachable/self-retained leak candidate and use the summary's grouped leak tree or `leaks --groupByType ` to identify the retained fields and payload chain. + +## Root Cause Rules + +- Identify the first app-owned leaked type in the leak output or trace. +- Determine the intended lifetime: process, session, account, view, request, or task. +- Treat lazy or deferred allocation as a scope reduction, not a leak fix, unless the original eager allocation itself violated the intended lifetime. +- Prove retain-cycle claims with either a `traceTree` ownership path or an isolated reproduction. +- For unreachable/self-cycle leaks, `traceTree` may have no root path; use `leaks --groupByType` plus source verification to find the self-retaining edge. +- Do not claim success just because total leak count went down; prove the specific type or path disappeared. +- Separate real root-cause branches from candidate/noise branches. +- Prefer deleting the retaining edge over adding broad cleanup code. + +## Report + +A useful leak report includes: + +- the exact flow and simulator/app build +- the memgraph and summary paths +- app-owned leaked types and counts +- at least one ownership path, or grouped leak tree evidence when the object is unreachable from roots +- the smallest proposed or applied retaining-edge fix +- before/after evidence when a fix was made + +If the memgraph shows only framework/runtime noise, say that and recommend the next narrower capture rather than inventing an app leak. diff --git a/plugins/build-ios-apps/skills/ios-memgraph-leaks/agents/openai.yaml b/plugins/build-ios-apps/skills/ios-memgraph-leaks/agents/openai.yaml new file mode 100644 index 00000000..c16d9be5 --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-memgraph-leaks/agents/openai.yaml @@ -0,0 +1,4 @@ +interface: + display_name: "iOS Memgraph Leaks" + short_description: "Capture and prove iOS simulator memory leaks" + default_prompt: "Use $ios-memgraph-leaks to capture an iOS simulator memgraph, identify retention paths, and verify leak fixes." diff --git a/plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/capture_sim_memgraph.sh b/plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/capture_sim_memgraph.sh new file mode 100755 index 00000000..f0804f02 --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/capture_sim_memgraph.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'USAGE' +Capture a memory graph from a running iOS simulator app. + +Required: + --udid UDID Simulator UDID + --bundle-id ID App bundle identifier, e.g. com.example.app + +Optional: + --out-dir DIR Output directory for the memgraph and leaks output + +Example: + capture_sim_memgraph.sh --udid "$SIM" --bundle-id com.example.app --out-dir /tmp/codex-ios-memgraph +USAGE +} + +require_value() { + local flag="$1" + local value="${2:-}" + if [[ -z "$value" ]]; then + echo "$flag requires a value" >&2 + usage >&2 + exit 2 + fi +} + +bundle_id="" +out_dir="" +udid="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --bundle-id) + require_value "$1" "${2:-}" + bundle_id="$2" + shift 2 + ;; + --out-dir) + require_value "$1" "${2:-}" + out_dir="$2" + shift 2 + ;; + --udid) + require_value "$1" "${2:-}" + udid="$2" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage >&2 + exit 2 + ;; + esac +done + +if [[ -z "$udid" ]]; then + echo "--udid is required" >&2 + usage >&2 + exit 2 +fi + +if [[ -z "$bundle_id" ]]; then + echo "--bundle-id is required" >&2 + usage >&2 + exit 2 +fi + +if [[ -z "$out_dir" ]]; then + out_dir="$(mktemp -d "${TMPDIR:-/tmp}/codex-ios-memgraph.XXXXXX")" +fi + +matching_processes="$( + xcrun simctl spawn "$udid" launchctl list | + awk -v bundle_id="$bundle_id" ' + $1 == "-" { + next + } + $3 == bundle_id { + print $1 "\t" $3 + next + } + index($3, "UIKitApplication:" bundle_id "[") == 1 { + print $1 "\t" $3 + } + ' +)" + +if [[ -z "$matching_processes" ]]; then + echo "Could not find a running PID for $bundle_id on $udid" >&2 + exit 1 +fi + +if [[ "$(printf '%s\n' "$matching_processes" | wc -l | tr -d ' ')" -ne 1 ]]; then + echo "Found multiple running PIDs for $bundle_id on $udid:" >&2 + printf '%s\n' "$matching_processes" >&2 + exit 1 +fi + +pid="$(printf '%s\n' "$matching_processes" | awk '{ print $1 }')" +process_label="$(printf '%s\n' "$matching_processes" | cut -f2-)" + +mkdir -p "$out_dir" + +timestamp="$(date +%Y%m%d-%H%M%S)" +safe_bundle="$(printf '%s' "$bundle_id" | tr -c 'A-Za-z0-9_.-' '_')" +memgraph="$out_dir/$safe_bundle-$pid-$timestamp.memgraph" +leaks_output="$out_dir/$safe_bundle-$pid-$timestamp.leaks.txt" +metadata="$out_dir/$safe_bundle-$pid-$timestamp.metadata.txt" + +{ + echo "date: $(date)" + echo "udid: $udid" + echo "bundle_id: $bundle_id" + echo "process_label: $process_label" + echo "pid: $pid" + echo "memgraph: $memgraph" + echo "leaks_output: $leaks_output" +} > "$metadata" + +set +e +leaks "--outputGraph=$memgraph" "$pid" > "$leaks_output" 2>&1 +leaks_status=$? +set -e + +echo "leaks_exit_status: $leaks_status" >> "$metadata" + +if [[ ! -f "$memgraph" ]]; then + echo "memgraph_missing: true" >> "$metadata" + echo "leaks failed to create a memgraph; see: $leaks_output" >&2 + echo "metadata: $metadata" >&2 + exit 1 +fi + +echo "memgraph: $memgraph" +echo "leaks output: $leaks_output" +echo "metadata: $metadata" diff --git a/plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/summarize_memgraph_leaks.py b/plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/summarize_memgraph_leaks.py new file mode 100755 index 00000000..4c77ec1f --- /dev/null +++ b/plugins/build-ios-apps/skills/ios-memgraph-leaks/scripts/summarize_memgraph_leaks.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +"""Summarize leaks output from an Apple .memgraph file.""" + +from __future__ import annotations + +import argparse +import re +import subprocess +import sys +from collections import Counter +from pathlib import Path + + +LEAK_RE = re.compile(r"^Leak:\s+(?P
0x[0-9a-fA-F]+)\s+size=(?P\d+)\s+(?P.*)$") +TOTAL_RE = re.compile(r"Process\s+\S+:\s+(?P\d+)\s+leaks?\s+for\s+(?P\d+)\s+total leaked bytes") + + +def run_leaks(args: list[str]) -> subprocess.CompletedProcess[str]: + return subprocess.run(["leaks", *args], text=True, capture_output=True, check=False) + + +def parse_leaks(output: str) -> tuple[str | None, list[dict[str, str]]]: + total = None + leaks: list[dict[str, str]] = [] + for line in output.splitlines(): + if total is None: + match = TOTAL_RE.search(line) + if match: + total = f"{match.group('count')} leaks / {match.group('bytes')} bytes" + match = LEAK_RE.match(line) + if match: + fields = match.groupdict() + rest = fields.pop("rest") + rest = re.sub(r"^zone:\s+\S+\s+", "", rest) + parts = re.split(r"\s{2,}", rest.strip(), maxsplit=2) + if len(parts) == 3: + fields["type"], fields["language"], fields["image"] = parts + elif len(parts) == 2: + fields["type"], fields["image"] = parts + fields["language"] = "" + else: + fields["type"] = rest.strip() or "" + fields["language"] = "" + fields["image"] = "" + leaks.append(fields) + return total, leaks + + +def trace_excerpt(memgraph: Path, address: str, max_lines: int) -> str: + result = run_leaks([f"--traceTree={address}", str(memgraph)]) + text = result.stdout or result.stderr + lines = [line.rstrip() for line in text.splitlines() if line.strip()] + return "\n".join(lines[:max_lines]) + + +def group_by_type_excerpt(memgraph: Path, max_lines: int) -> str: + result = run_leaks(["--groupByType", str(memgraph)]) + text = result.stdout or result.stderr + lines = [line.rstrip() for line in text.splitlines() if line.strip()] + return "\n".join(lines[:max_lines]) + + +def render(memgraph: Path, trace_limit: int, trace_lines: int, raw_output: str) -> str: + total, leaks = parse_leaks(raw_output) + by_type = Counter(leak["type"] for leak in leaks) + by_image = Counter(leak["image"] for leak in leaks) + + lines: list[str] = [] + lines.append(f"# Leak Summary: {memgraph}") + lines.append("") + lines.append(f"- Total: {total or 'not found'}") + lines.append(f"- Parsed leak entries: {len(leaks)}") + lines.append("") + + if by_type: + lines.append("## Top Types") + for name, count in by_type.most_common(20): + lines.append(f"- {count}x {name}") + lines.append("") + + if by_image: + lines.append("## Top Images") + for name, count in by_image.most_common(20): + lines.append(f"- {count}x {name}") + lines.append("") + + if leaks: + lines.append("## Leak Entries") + for leak in leaks[:50]: + lines.append( + f"- {leak['address']} size={leak['size']} type={leak['type']} " + f"image={leak['image']}" + ) + if len(leaks) > 50: + lines.append(f"- ... {len(leaks) - 50} more") + lines.append("") + + if trace_limit > 0 and leaks: + lines.append("## TraceTree Excerpts") + for leak in leaks[:trace_limit]: + lines.append(f"### {leak['address']} {leak['type']}") + excerpt = trace_excerpt(memgraph, leak["address"], trace_lines) + lines.append("~~~text") + lines.append(excerpt or "") + lines.append("~~~") + lines.append("") + + if leaks: + lines.append("## Grouped Leak Tree") + lines.append("Use this when `traceTree` has no roots, which is common for unreachable retain cycles.") + lines.append("~~~text") + lines.append(group_by_type_excerpt(memgraph, trace_lines) or "") + lines.append("~~~") + lines.append("") + + lines.append("## Raw Commands") + lines.append("~~~bash") + lines.append(f"leaks --list {memgraph}") + if leaks: + lines.append(f"leaks --groupByType {memgraph}") + if leaks: + lines.append(f"leaks --traceTree={leaks[0]['address']} {memgraph}") + lines.append("~~~") + lines.append("") + + return "\n".join(lines) + + +def main() -> int: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("memgraph", type=Path) + parser.add_argument("--trace-limit", type=int, default=0, help="Number of leaks to trace with --traceTree") + parser.add_argument("--trace-lines", type=int, default=80, help="Max lines per traceTree excerpt") + parser.add_argument("--out", type=Path, help="Write markdown summary to this file") + args = parser.parse_args() + + if not args.memgraph.exists(): + print(f"memgraph not found: {args.memgraph}", file=sys.stderr) + return 2 + + result = run_leaks(["--list", str(args.memgraph)]) + raw = result.stdout or result.stderr + total, leaks = parse_leaks(raw) + if result.returncode != 0 and total is None and not leaks: + print(raw, file=sys.stderr, end="" if raw.endswith("\n") else "\n") + return result.returncode or 1 + + summary = render(args.memgraph, args.trace_limit, args.trace_lines, raw) + if args.out: + args.out.parent.mkdir(parents=True, exist_ok=True) + args.out.write_text(summary) + print(args.out) + else: + print(summary) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/plugins/test-android-apps/.codex-plugin/plugin.json b/plugins/test-android-apps/.codex-plugin/plugin.json index d5802014..25349879 100644 --- a/plugins/test-android-apps/.codex-plugin/plugin.json +++ b/plugins/test-android-apps/.codex-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "test-android-apps", "version": "0.1.0", - "description": "Test Android apps with emulator workflows for reproduction, screenshots, UI inspection, and log capture.", + "description": "Test Android apps with emulator workflows for reproduction, screenshots, UI inspection, log capture, and performance profiling.", "author": { "name": "OpenAI", "email": "support@openai.com", @@ -16,13 +16,19 @@ "emulator", "qa", "logcat", - "uiautomator" + "uiautomator", + "performance", + "simpleperf", + "perfetto", + "memory", + "leaks", + "heap" ], "skills": "./skills/", "interface": { "displayName": "Test Android Apps", - "shortDescription": "Reproduce issues, inspect UI, and capture evidence from Android emulators", - "longDescription": "Use Test Android Apps to build and install app variants, drive a booted Android emulator with adb input events, inspect UI trees, capture screenshots, and collect logcat output while reproducing issues.", + "shortDescription": "Reproduce issues, inspect UI, and capture performance evidence from Android emulators", + "longDescription": "Use Test Android Apps to build and install app variants, drive a booted Android emulator with adb input events, inspect UI trees, capture screenshots, collect logcat output, and gather Simpleperf, Perfetto, gfxinfo, or memory/leak evidence while reproducing issues.", "developerName": "OpenAI", "category": "Coding", "capabilities": [ @@ -33,7 +39,7 @@ "privacyPolicyURL": "https://openai.com/policies/privacy-policy/", "termsOfServiceURL": "https://openai.com/policies/terms-of-use/", "defaultPrompt": [ - "Use Test Android Apps to reproduce an emulator issue with adb, then capture screenshots, UI state, and logcat" + "Use Test Android Apps to reproduce an emulator issue, then capture screenshots, logs, UI state, and performance evidence." ], "brandColor": "#3DDC84", "composerIcon": "./assets/test-android-apps-small.svg", diff --git a/plugins/test-android-apps/agents/openai.yaml b/plugins/test-android-apps/agents/openai.yaml index b7e428c2..9d678ea2 100644 --- a/plugins/test-android-apps/agents/openai.yaml +++ b/plugins/test-android-apps/agents/openai.yaml @@ -1,6 +1,6 @@ interface: display_name: "Test Android Apps" - short_description: "Reproduce issues, inspect UI, and capture evidence from Android emulators" + short_description: "Reproduce issues, inspect UI, and capture Android performance evidence" icon_small: "./assets/test-android-apps-small.svg" icon_large: "./assets/app-icon.png" - default_prompt: "Use Test Android Apps to build an app, launch it on a booted emulator, reproduce the issue with adb-driven UI steps, and capture screenshots plus logs." + default_prompt: "Use Test Android Apps to reproduce an emulator issue, then capture screenshots, logs, UI state, and performance evidence." diff --git a/plugins/test-android-apps/skills/android-performance/SKILL.md b/plugins/test-android-apps/skills/android-performance/SKILL.md new file mode 100644 index 00000000..5301dea2 --- /dev/null +++ b/plugins/test-android-apps/skills/android-performance/SKILL.md @@ -0,0 +1,279 @@ +--- +name: android-performance +description: Gather and interpret Android performance evidence on an adb target using Simpleperf CPU profiles, Perfetto or Compose traces, gfxinfo frame data, dumpsys meminfo snapshots, Java heap dumps, and native allocation traces. Use when asked to profile an Android app flow, find CPU-heavy functions, diagnose jank, capture startup or frame timing evidence, compare before/after performance, explain what code is taking time, or gather memory/leak profiling artifacts. +--- + +# Android Performance + +Use this skill to capture Android performance evidence for adb-installable apps. CPU sampling usually requires a debuggable or profileable build; frame stats, Perfetto, and logcat can still help when an app cannot be sampled. Compose with `../android-emulator-qa/SKILL.md` for device selection, build/install/launch, UI driving, screenshots, UI trees, and logcat capture. + +## Core Workflow + +1. Pick one focused user-visible flow. +2. Choose the trace type that matches the question. +3. Record the flow with clear start and stop boundaries. +4. Pull or copy the trace produced by that run, then generate reports from that file. +5. Interpret reports with caveats about device, build type, sample count, and profiler limits. + +Avoid broad "use the app for a while" captures. They make traces hard to attribute and usually hide the functions that matter. + +Use a local adb target for meaningful timing. Store outputs in a run-specific artifact folder outside the skill directory: + +```bash +if [ -z "${ARTIFACT_DIR:-}" ]; then + ARTIFACT_DIR="$(mktemp -d "${TMPDIR:-/tmp}/codex-android-perf.XXXXXX")" +fi +mkdir -p "$ARTIFACT_DIR" +``` + +Do not put `ARTIFACT_DIR` under `SKILL_DIR`; the skill folder is for bundled instructions and scripts, not run artifacts. + +## Choosing A Trace + +- Use **Simpleperf** when the question is "what functions are taking CPU time?" or when you need a sampled profile of Kotlin, Java, native, or framework execution. +- Use **Perfetto** when the question is frame timing, startup timeline, scheduler gaps, binder work, lock contention, main-thread stalls, Compose recomposition, or why a flow felt janky. +- Use **gfxinfo framestats** for a quick manual frame/jank snapshot. Pair it with Perfetto when you need root cause. +- Use **meminfo / heap dumps** when the question is retained Java/Kotlin objects, PSS, native heap, or object counts after a focused flow. + +## Simpleperf CPU Profiles + +Simpleperf `--app` works best when the installed package is debuggable or profileable from shell. Preflight before recording: + +```bash +SERIAL="" +PACKAGE="" + +adb -s "$SERIAL" shell dumpsys package "$PACKAGE" | grep -Ei 'DEBUGGABLE|profileable|isProfileable' || true +``` + +If the package is not debuggable/profileable and `simpleperf record --app` fails, install a debug/profileable build when possible. If that is not possible, use Perfetto or `gfxinfo` instead of treating missing CPU samples as evidence. + +Start recording in one terminal or as a long-running Codex command session: + +```bash +SERIAL="" +PACKAGE="" +MAX_DURATION_SECONDS=60 + +adb -s "$SERIAL" shell rm -f /data/local/tmp/perf.data +adb -s "$SERIAL" logcat -c + +adb -s "$SERIAL" shell simpleperf record \ + --app "$PACKAGE" \ + -o /data/local/tmp/perf.data \ + -e cpu-clock -f 4000 -g \ + --duration "$MAX_DURATION_SECONDS" +``` + +While that command is running, perform exactly one focused flow with adb input, UI automation, or `android-emulator-qa`. + +Stop Simpleperf from another command and wait for the recording command to exit: + +```bash +adb -s "$SERIAL" shell 'pid="$(pidof simpleperf 2>/dev/null || true)"; [ -n "$pid" ] && kill -INT $pid' +``` + +If that returns `Operation not permitted`, send Ctrl-C to the original `adb shell simpleperf record` command session and wait for it to exit. + +Pull and report the capture: + +```bash +adb -s "$SERIAL" pull /data/local/tmp/perf.data "$ARTIFACT_DIR/perf.data" +adb -s "$SERIAL" logcat -d > "$ARTIFACT_DIR/logcat.txt" + +SKILL_DIR="" +FIRST_PARTY_REGEX="$(printf '%s' "$PACKAGE" | sed 's/\./\\./g')" +"$SKILL_DIR/scripts/simpleperf_hotspots.sh" \ + "$ARTIFACT_DIR/perf.data" \ + "$ARTIFACT_DIR" \ + --serial "$SERIAL" \ + --first-party-regex "$FIRST_PARTY_REGEX" +``` + +Do not derive `SKILL_DIR` from the target app repo's `pwd`; installed plugins usually live outside the app being profiled. Keep `FIRST_PARTY_REGEX` scoped to the app's package or app-owned module prefixes; avoid broad framework patterns such as `kotlin`, `Compose`, or `androidx.compose` when reporting app-owned rows. + +The helper writes: + +- `$ARTIFACT_DIR/simpleperf-self.txt` +- `$ARTIFACT_DIR/simpleperf-children.txt` +- `$ARTIFACT_DIR/simpleperf.csv` when supported by the installed Simpleperf + +If host Simpleperf is not installed, the helper searches Android Studio and Android SDK/NDK locations. If unavailable, it falls back to device-side `adb shell simpleperf report` when the device still has `/data/local/tmp/perf.data`. + +## Reading Simpleperf + +Simpleperf reports sampled CPU execution. It does not directly measure suspended coroutines, network latency, lock wait time, or other wall-clock waits. If a flow feels slow but Simpleperf shows little app CPU, capture Perfetto to inspect scheduler gaps, binder work, locks, frame timing, and app trace sections. + +Read reports this way: + +- **Self/Overhead**: samples where the function itself was executing. Use this for hot leaf work such as parsing, formatting, diffing, sorting, allocation-heavy iteration, or JSON/protobuf processing. +- **Children/inclusive**: samples in the function and its callees. Use this for expensive entry points such as repositories, use cases, view models, Composables, startup initializers, or feature coordinators. +- **Shared Object / Symbol**: prefer app-owned package frames, feature modules, domain/data/UI modules, and generated app code. Treat Android framework, Kotlin runtime, Compose, and native/runtime frames as context unless the app-owned caller is visible. +- **Percentages**: useful for ranking functions inside one capture. For user-facing timing claims, pair with Perfetto, `gfxinfo`, or repeated wall-clock measurements. + +When interpreting a hotspot, note symbol/function name, self or inclusive percentage, approximate sampled CPU time when available, caller stack or owning source file, flow steps, artifact paths, and whether the capture is single-run or repeated. + +## Perfetto / Compose Trace + +If the app repo already documents a Perfetto/System Trace command for that project, use it. Otherwise use Perfetto directly. The light command below captures scheduler/frequency/Android atrace categories and app `Trace` sections for `PACKAGE`; it is not a substitute for a full project-specific Perfetto config when you need detailed frame timeline or Compose runtime internals. + +```bash +SERIAL="" +PACKAGE="" +TRACE_DURATION_SECONDS=30 +TRACE_BASENAME="app-flow-$(date +%Y%m%d-%H%M%S).pftrace" +TRACE_DEVICE="/data/misc/perfetto-traces/$TRACE_BASENAME" + +PERFETTO_PID="$(adb -s "$SERIAL" shell perfetto \ + --background-wait \ + -o "$TRACE_DEVICE" \ + -t "${TRACE_DURATION_SECONDS}s" \ + --app "$PACKAGE" \ + sched freq idle am wm gfx view binder_driver hal dalvik | tr -d '\r' | tail -n 1)" +printf 'Perfetto PID: %s\n' "$PERFETTO_PID" +``` + +Run exactly one focused flow before `TRACE_DURATION_SECONDS` expires. To stop early, gracefully terminate the background Perfetto process and give it a moment to flush: + +```bash +adb -s "$SERIAL" shell kill -TERM "$PERFETTO_PID" 2>/dev/null || true +adb -s "$SERIAL" shell " + last_size=-1 + stable_count=0 + i=0 + while [ \$i -lt 30 ]; do + size=\$(ls -l '$TRACE_DEVICE' 2>/dev/null | awk '{ print \$5 }') + if [ -n \"\$size\" ] && [ \"\$size\" -gt 0 ] && [ \"\$size\" = \"\$last_size\" ]; then + stable_count=\$((stable_count + 1)) + [ \$stable_count -ge 2 ] && exit 0 + else + stable_count=0 + fi + last_size=\"\${size:-0}\" + i=\$((i + 1)) + sleep 1 + done + exit 1 +" +``` + +Prefer letting `TRACE_DURATION_SECONDS` expire instead of stopping early. If the stop command fails because the trace already ended, still wait until the output file exists and its size is stable before pulling. If the direct command is too coarse, use Android Studio System Trace or a project-specific Perfetto config. Only report frame timeline or Compose recomposition details when those tracks/events are actually present in the captured trace; the light command above does not guarantee them. + +Pull the exact on-device trace from this run: + +```bash +adb -s "$SERIAL" pull "$TRACE_DEVICE" "$ARTIFACT_DIR/$TRACE_BASENAME" +``` + +In Perfetto, inspect: + +- main-thread slices around missed frames or long startup sections +- frame scheduling, frame timeline, and render thread lanes +- Compose runtime tracing sections for recomposition work when enabled +- binder transactions, monitor contention, scheduler gaps, and app log markers + +## gfxinfo Framestats + +Use this for a quick manual frame snapshot: + +```bash +SERIAL="" +PACKAGE="" + +adb -s "$SERIAL" shell pidof "$PACKAGE" +adb -s "$SERIAL" shell dumpsys window | grep -F "$PACKAGE" +adb -s "$SERIAL" shell dumpsys gfxinfo "$PACKAGE" reset +# Perform the focused flow. +adb -s "$SERIAL" shell dumpsys gfxinfo "$PACKAGE" > "$ARTIFACT_DIR/gfxinfo.txt" +adb -s "$SERIAL" shell dumpsys gfxinfo "$PACKAGE" framestats > "$ARTIFACT_DIR/gfxinfo-framestats.txt" +``` + +Capture from a stable, responsive screen. If `dumpsys gfxinfo` fails to dump the process, or the device shows an ANR/dialog/splash screen instead of the flow, discard that capture and use Perfetto for root cause. + +Read the headline summary first: total frames, janky frames, frame percentiles, slow UI thread, slow draw commands, and frame deadline misses. On emulators, absolute smoothness numbers are noisy; percentile spikes and slow draw/UI counters are still useful for deciding whether to take a Perfetto trace. + +## Memory / Leak Artifacts + +Use this on an adb target after narrowing the investigation to one flow. Exercise the flow, return to a stable screen, then capture memory artifacts from that state. + +For quick Java/native/PSS/object-count snapshots: + +```bash +SERIAL="" +PACKAGE="" + +adb -s "$SERIAL" shell am force-stop "$PACKAGE" +adb -s "$SERIAL" shell monkey -p "$PACKAGE" 1 +# Exercise the focused flow, then navigate back to a stable idle screen. +adb -s "$SERIAL" shell dumpsys meminfo "$PACKAGE" > "$ARTIFACT_DIR/meminfo-flow.txt" +``` + +Read `TOTAL PSS`, Java heap, native heap, graphics, `Views`, `Activities`, binder counts, and object counts. Treat one noisy sample as a lead, not a conclusion. + +For retained Kotlin/Java objects, prefer Shark CLI when it is available. It works with Android heap dumps and produces text output the agent can inspect and cite. + +```bash +HEAP="/data/local/tmp/app-flow.hprof" +HPROF="$ARTIFACT_DIR/app-flow.hprof" + +if ! command -v shark-cli >/dev/null; then + echo "Install Shark CLI, or analyze the HPROF with Android Studio Profiler / MAT." >&2 +fi + +adb -s "$SERIAL" shell am dumpheap -g "$PACKAGE" "$HEAP" +adb -s "$SERIAL" pull "$HEAP" "$HPROF" +adb -s "$SERIAL" shell rm -f "$HEAP" + +if command -v shark-cli >/dev/null; then + shark-cli --hprof "$HPROF" analyze | tee "$ARTIFACT_DIR/shark-analysis.txt" +fi +``` + +Read `shark-analysis.txt` first when it exists. Report suspected leaking objects, retained sizes, and reference chains. Look for retained feature objects, activities, fragments, view models, Compose state holders, repositories, listeners, callbacks, and caches that should have been released after leaving the flow. If Shark CLI is unavailable, still preserve the HPROF path and inspect it with the best available heap analyzer; do not claim leak roots from `meminfo` alone. + +For native allocation growth, capture a Perfetto trace with heapprofd enabled. Keep the duration in the config; current Android `perfetto` rejects `-t` together with `--config`. + +```bash +TRACE_DEVICE="/data/misc/perfetto-traces/native-alloc.pftrace" + +adb -s "$SERIAL" shell perfetto -o "$TRACE_DEVICE" \ + --txt -c - <&2 <<'USAGE' +Usage: heapprofd_reports.sh [output-dir] + +Generates text reports from a Perfetto trace captured with the heapprofd data +source enabled. +USAGE +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +trace="${1:-}" +if [[ -z "$trace" ]]; then + usage + exit 2 +fi + +if [[ ! -f "$trace" ]]; then + echo "Trace not found: $trace" >&2 + exit 1 +fi + +if [[ "$trace" != /* ]]; then + trace="$(cd "$(dirname "$trace")" && pwd)/$(basename "$trace")" +fi + +output_dir="${2:-$(dirname "$trace")}" +mkdir -p "$output_dir" +if [[ "$output_dir" != /* ]]; then + output_dir="$(cd "$output_dir" && pwd)" +fi + +trace_processor="${TRACE_PROCESSOR_SHELL:-}" +if [[ -z "$trace_processor" ]]; then + if command -v trace_processor_shell >/dev/null 2>&1; then + trace_processor="trace_processor_shell" + elif command -v trace_processor >/dev/null 2>&1; then + trace_processor="trace_processor" + fi +fi + +if [[ -z "$trace_processor" ]] || ! command -v "$trace_processor" >/dev/null 2>&1; then + echo "Perfetto trace processor not found. Set TRACE_PROCESSOR_SHELL or install trace_processor_shell/trace_processor." >&2 + exit 1 +fi + +run_query() { + "$trace_processor" -Q "$1" "$trace" +} + +summary_report="$output_dir/heapprofd-summary.txt" +top_allocations_report="$output_dir/heapprofd-top-allocations.txt" +top_stack_report="$output_dir/heapprofd-top-stack.txt" +health_report="$output_dir/heapprofd-health.txt" + +run_query " +select count(*) as allocation_rows, sum(size) as net_size +from __intrinsic_heap_profile_allocation; +" > "$summary_report" + +run_query " +with alloc as ( + select callsite_id, sum(size) as net_size, sum(count) as net_count + from __intrinsic_heap_profile_allocation + group by callsite_id + having net_size > 0 +) +select + alloc.callsite_id, + alloc.net_size, + alloc.net_count, + frame.name as leaf_frame, + mapping.name as leaf_mapping +from alloc +join __intrinsic_stack_profile_callsite callsite on callsite.id = alloc.callsite_id +join __intrinsic_stack_profile_frame frame on frame.id = callsite.frame_id +join __intrinsic_stack_profile_mapping mapping on mapping.id = frame.mapping +order by alloc.net_size desc +limit 30; +" > "$top_allocations_report" + +top_callsite="$( + run_query " + select callsite_id + from __intrinsic_heap_profile_allocation + group by callsite_id + having sum(size) > 0 + order by sum(size) desc + limit 1; + " | awk -F, 'NR > 1 { gsub(/"/, "", $1); print $1; exit }' +)" + +if [[ "$top_callsite" =~ ^[0-9]+$ ]]; then + run_query " + with recursive stack(id, depth, parent_id, frame_id) as ( + select id, depth, parent_id, frame_id + from __intrinsic_stack_profile_callsite + where id = $top_callsite + union all + select callsite.id, callsite.depth, callsite.parent_id, callsite.frame_id + from __intrinsic_stack_profile_callsite callsite + join stack on callsite.id = stack.parent_id + ) + select stack.depth, frame.name as frame, mapping.name as mapping + from stack + join __intrinsic_stack_profile_frame frame on frame.id = stack.frame_id + join __intrinsic_stack_profile_mapping mapping on mapping.id = frame.mapping + order by stack.depth; + " > "$top_stack_report" +else + printf 'No positive net allocation callsite found.\n' > "$top_stack_report" +fi + +run_query " +select name, idx, severity, source, value, description +from stats +where lower(name) like '%heapprofd%' + or lower(name) like 'traced_buf%' + or lower(name) like '%packet_loss%' + or lower(name) like '%overrun%' +order by name, idx; +" > "$health_report" + +printf 'Wrote: %s\n' "$summary_report" +printf 'Wrote: %s\n' "$top_allocations_report" +printf 'Wrote: %s\n' "$top_stack_report" +printf 'Wrote: %s\n' "$health_report" diff --git a/plugins/test-android-apps/skills/android-performance/scripts/simpleperf_hotspots.sh b/plugins/test-android-apps/skills/android-performance/scripts/simpleperf_hotspots.sh new file mode 100755 index 00000000..394f93ea --- /dev/null +++ b/plugins/test-android-apps/skills/android-performance/scripts/simpleperf_hotspots.sh @@ -0,0 +1,247 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat >&2 <<'USAGE' +Usage: simpleperf_hotspots.sh [output-dir] [--serial SERIAL] [--first-party-regex REGEX] + +Generates Simpleperf self-time, children/inclusive, and optional CSV reports +for a focused Android profiling capture. + +Options: + --serial SERIAL adb serial for device-side fallback. + --first-party-regex REGEX Extra grep pattern for likely app-owned rows. + +Environment: + ANDROID_PERF_FIRST_PARTY_REGEX Default first-party grep pattern + ANDROID_PERF_SERIAL adb serial for device-side fallback + ANDROID_PERF_DEVICE_DATA device perf.data path for fallback +USAGE +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +perf_data="" +output_dir="" +first_party_regex="${ANDROID_PERF_FIRST_PARTY_REGEX:-}" +serial="${ANDROID_PERF_SERIAL:-}" + +while [[ $# -gt 0 ]]; do + case "$1" in + --first-party-regex) + if [[ -z "${2:-}" ]]; then + echo "--first-party-regex requires a value" >&2 + usage + exit 2 + fi + first_party_regex="$2" + shift 2 + ;; + --serial) + if [[ -z "${2:-}" ]]; then + echo "--serial requires a value" >&2 + usage + exit 2 + fi + serial="$2" + shift 2 + ;; + -*) + echo "Unknown argument: $1" >&2 + usage + exit 2 + ;; + *) + if [[ -z "$perf_data" ]]; then + perf_data="$1" + elif [[ -z "$output_dir" ]]; then + output_dir="$1" + else + echo "Unexpected argument: $1" >&2 + usage + exit 2 + fi + shift + ;; + esac +done + +if [[ -z "$perf_data" ]]; then + usage + exit 2 +fi + +if [[ -n "$first_party_regex" ]]; then + set +e + awk -v re="$first_party_regex" 'BEGIN { _ = ("" ~ re); exit 0 }' >/dev/null 2>&1 + regex_status=$? + set -e + if [[ $regex_status -ne 0 ]]; then + echo "Invalid --first-party-regex: $first_party_regex" >&2 + exit 2 + fi +fi + +perf_data_exists=0 +if [[ -f "$perf_data" ]]; then + perf_data_exists=1 +fi + +if [[ "$perf_data_exists" -eq 1 && "$perf_data" != /* ]]; then + perf_data="$(cd "$(dirname "$perf_data")" && pwd)/$(basename "$perf_data")" +fi + +output_dir="${output_dir:-$(dirname "$perf_data")}" +mkdir -p "$output_dir" +if [[ "$output_dir" != /* ]]; then + output_dir="$(cd "$output_dir" && pwd)" +fi + +search_roots=() +for env_var in ANDROID_NDK_HOME ANDROID_HOME ANDROID_SDK_ROOT; do + value="${!env_var:-}" + if [[ -n "$value" && -d "$value" ]]; then + search_roots+=("$value") + fi +done + +default_sdk="$HOME/Library/Android/sdk" +if [[ -d "$default_sdk" ]]; then + search_roots+=("$default_sdk") +fi + +android_studio_simpleperf="/Applications/Android Studio.app/Contents/plugins/android/resources/simpleperf" +if [[ -d "$android_studio_simpleperf" ]]; then + search_roots+=("$android_studio_simpleperf") +fi + +simpleperf_bin="" +if command -v simpleperf >/dev/null 2>&1; then + candidate_simpleperf="$(command -v simpleperf)" + if [[ -x "$candidate_simpleperf" ]] && "$candidate_simpleperf" --version >/dev/null 2>&1; then + simpleperf_bin="$candidate_simpleperf" + fi +fi + +if [[ -z "$simpleperf_bin" ]] && (( ${#search_roots[@]} > 0 )); then + while IFS= read -r simpleperf_path; do + if [[ -x "$simpleperf_path" ]] && "$simpleperf_path" --version >/dev/null 2>&1; then + simpleperf_bin="$simpleperf_path" + break + fi + done < <( + { + find "${search_roots[@]}" -path '*/darwin-*/simpleperf' -type f -print + find "${search_roots[@]}" -path '*/simpleperf' -type f -print + } 2>/dev/null | awk '!seen[$0]++' + ) +fi + +print_report_summary() { + local children_report="$1" + + printf '\nTop inclusive rows:\n' + awk 'seen || /^(Overhead|Children)/ { seen = 1; print }' "$children_report" | sed -n '1,80p' + + printf '\nLikely first-party rows:\n' + if [[ -n "$first_party_regex" ]]; then + awk -v re="$first_party_regex" ' + function is_sample_row(columns) { + return columns >= 7 && $1 ~ /%$/ && $2 ~ /%$/ + } + { + sub(/^[[:space:]]+/, "") + columns = split($0, fields, /[[:space:]]{2,}/) + if (is_sample_row(columns)) { + shared_object = fields[6] + symbol = fields[7] + if ((shared_object " " symbol) ~ re) { + print + found = 1 + } + } + } + END { + if (!found) { + print "No rows matched the first-party regex in Shared Object or Symbol columns." + } + } + ' "$children_report" | sed -n '1,120p' + else + printf 'No first-party regex configured. Re-run with --first-party-regex scoped to the app package or app-owned modules.\n' + fi +} + +if [[ -z "$simpleperf_bin" ]]; then + if ! command -v adb >/dev/null 2>&1; then + echo "Host Simpleperf unavailable and adb is not installed or not on PATH." >&2 + echo "Install Android platform-tools, set PATH for adb, or install host Simpleperf." >&2 + exit 1 + fi + + if [[ -z "$serial" ]]; then + connected_devices=() + while IFS= read -r connected_device; do + connected_devices+=("$connected_device") + done < <( + adb devices 2>/dev/null | + awk 'NR > 1 && $2 == "device" { print $1 }' + ) + if [[ ${#connected_devices[@]} -eq 1 ]]; then + serial="${connected_devices[0]}" + else + echo "Host Simpleperf unavailable and no adb serial was provided." >&2 + echo "Pass --serial or set ANDROID_PERF_SERIAL so device-side fallback reads the intended target." >&2 + adb devices >&2 || true + exit 1 + fi + fi + + device_perf_data="${ANDROID_PERF_DEVICE_DATA:-/data/local/tmp/perf.data}" + if [[ -n "$serial" ]] && adb -s "$serial" shell test -f "$device_perf_data" 2>/dev/null; then + self_report="$output_dir/simpleperf-self.txt" + children_report="$output_dir/simpleperf-children.txt" + + adb -s "$serial" shell simpleperf report -i "$device_perf_data" > "$self_report" + adb -s "$serial" shell simpleperf report -i "$device_perf_data" --children > "$children_report" + + printf 'Simpleperf binary: device %s\n' "$serial" + printf 'Wrote: %s\n' "$self_report" + printf 'Wrote: %s\n' "$children_report" + printf 'First-party regex: %s\n' "${first_party_regex:-}" + print_report_summary "$children_report" + exit 0 + fi + + echo "Could not find host Simpleperf on PATH or under: ${search_roots[*]:-(no search roots)}" >&2 + echo "Also could not use device-side fallback. Set ANDROID_PERF_SERIAL and keep perf.data on the device, or install host Simpleperf." >&2 + exit 1 +fi + +if [[ "$perf_data_exists" -ne 1 ]]; then + echo "perf.data not found: $perf_data" >&2 + exit 1 +fi + +self_report="$output_dir/simpleperf-self.txt" +children_report="$output_dir/simpleperf-children.txt" +csv_report="$output_dir/simpleperf.csv" + +"$simpleperf_bin" report -i "$perf_data" > "$self_report" +"$simpleperf_bin" report -i "$perf_data" --children > "$children_report" + +if "$simpleperf_bin" report -i "$perf_data" --children --csv > "$csv_report" 2>/dev/null; then + : +else + rm -f "$csv_report" +fi + +printf 'Simpleperf binary: %s\n' "$simpleperf_bin" +printf 'Wrote: %s\n' "$self_report" +printf 'Wrote: %s\n' "$children_report" +[[ -f "$csv_report" ]] && printf 'Wrote: %s\n' "$csv_report" +printf 'First-party regex: %s\n' "${first_party_regex:-}" +print_report_summary "$children_report"