diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 480042a..e89442d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,5 +1,6 @@ -# Generated by runtime_ci_tooling v0.14.0 +# Generated by runtime_ci_tooling v0.14.1 # Configured via .runtime_ci/config.json — run 'dart run runtime_ci_tooling:manage_cicd update --workflows' to regenerate. +# Policy: test artifact retention-days = 7 (applied consistently). name: CI on: @@ -57,7 +58,7 @@ jobs: sdk: "3.9.2" - name: Format code - run: dart format --line-length 120 lib/ + run: dart format --line-length 120 . - name: Commit and push formatting id: format-push @@ -65,7 +66,7 @@ jobs: if ! git diff --quiet; then git config user.name "github-actions[bot]" git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - git add lib/ + git add -u -- '*.dart' git commit -m "bot(format): apply dart format --line-length 120 [skip ci]" if git push; then echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" @@ -92,13 +93,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -106,10 +108,11 @@ jobs: sdk: "3.9.2" # ── shared:pub-cache ── keep in sync with single_platform ── + # Windows: %LOCALAPPDATA%\Pub\Cache (Dart default). Unix: ~/.pub-cache - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- @@ -119,6 +122,9 @@ jobs: env: GIT_LFS_SKIP_SMUDGE: "1" + - name: Run build_runner + run: dart run build_runner build --delete-conflicting-outputs + # ── shared:analysis-cache ── keep in sync with single_platform ── # ── shared:proto-verify ── keep in sync with single_platform ── # ── shared:analyze ── keep in sync with single_platform ── @@ -150,13 +156,14 @@ jobs: # ── shared:git-config ── keep in sync with single_platform ── - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - echo "::add-mask::${TOKEN}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + echo "::add-mask::${GH_PAT}" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" # ── shared:dart-setup ── keep in sync with single_platform ── - uses: dart-lang/setup-dart@v1.7.1 @@ -164,37 +171,49 @@ jobs: sdk: "3.9.2" # ── shared:pub-cache ── keep in sync with single_platform ── + # Windows: %LOCALAPPDATA%\Pub\Cache (Dart default). Unix: ~/.pub-cache - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-${{ runner.arch }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-${{ runner.arch }}-dart-pub- + # ── shared:proto-setup ── keep in sync with single_platform ── # ── shared:pub-get ── keep in sync with single_platform ── - run: dart pub get env: GIT_LFS_SKIP_SMUDGE: "1" + - name: Run build_runner + run: dart run build_runner build --delete-conflicting-outputs + # --- BEGIN USER: pre-test --- # --- END USER: pre-test --- # ── shared:test ── keep in sync with single_platform ── - name: Test - run: dart test + shell: bash + run: | + set -o pipefail + mkdir -p "$TEST_LOG_DIR" + dart run runtime_ci_tooling:manage_cicd test 2>&1 | tee "$TEST_LOG_DIR/console.log" + env: + TEST_LOG_DIR: ${{ runner.temp }}/test-logs + PLATFORM_ID: ${{ matrix.platform_id }} - - name: Upload test artifacts on failure - if: failure() + - name: Upload test logs + if: success() || failure() uses: actions/upload-artifact@v4 with: - name: test-artifacts-${{ matrix.platform_id }} + name: test-logs-${{ matrix.platform_id }} path: | + ${{ runner.temp }}/test-logs/ test/integration/fixtures/bin/ **/test-results/ retention-days: 7 # --- BEGIN USER: post-test --- # --- END USER: post-test --- - # --- BEGIN USER: extra-jobs --- # --- END USER: extra-jobs --- diff --git a/.github/workflows/issue-triage.yaml b/.github/workflows/issue-triage.yaml index 80c74f4..770bd69 100644 --- a/.github/workflows/issue-triage.yaml +++ b/.github/workflows/issue-triage.yaml @@ -50,12 +50,13 @@ jobs: - name: Configure Git for HTTPS with Token if: steps.trigger.outputs.run == 'true' shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 if: steps.trigger.outputs.run == 'true' @@ -66,7 +67,7 @@ jobs: if: steps.trigger.outputs.run == 'true' uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -79,6 +80,7 @@ jobs: node-version: "22" - run: npm install -g @google/gemini-cli@latest + if: steps.trigger.outputs.run == 'true' - name: Cache Go modules (GitHub MCP server) if: steps.trigger.outputs.run == 'true' @@ -93,4 +95,5 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: dart run runtime_ci_tooling:triage_cli ${{ github.event.issue.number }} + ISSUE_NUMBER: ${{ github.event.issue.number }} + run: dart run runtime_ci_tooling:triage_cli "$ISSUE_NUMBER" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2f1b797..78511ad 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -69,12 +69,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -83,7 +84,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -142,12 +143,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -156,7 +158,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -180,17 +182,19 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd triage pre-release \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" # Find manifest from .runtime_ci/runs/ audit trail MANIFEST=$(find .runtime_ci/runs -name "issue_manifest.json" -type f 2>/dev/null | sort -r | head -1) if [ -n "$MANIFEST" ]; then cp "$MANIFEST" /tmp/issue_manifest.json else - echo '{"version":"${{ needs.determine-version.outputs.new_version }}","github_issues":[],"sentry_issues":[],"cross_repo_issues":[]}' > /tmp/issue_manifest.json + echo "{\"version\":\"${NEW_VERSION}\",\"github_issues\":[],\"sentry_issues\":[],\"cross_repo_issues\":[]}" > /tmp/issue_manifest.json fi - uses: actions/upload-artifact@v6.0.0 @@ -223,12 +227,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -237,7 +242,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -266,10 +271,12 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd explore \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" - name: Create fallback stage1 artifacts if missing run: | @@ -311,12 +318,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -325,7 +333,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -359,19 +367,23 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd compose \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" - name: Documentation update env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd documentation \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" - uses: actions/upload-artifact@v6.0.0 with: @@ -410,12 +422,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - name: Set up Dart uses: dart-lang/setup-dart@v1.7.1 @@ -425,7 +438,7 @@ jobs: - name: Cache Dart pub uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-pub-${{ hashFiles('**/pubspec.lock') }} restore-keys: ${{ runner.os }}-pub- @@ -510,12 +523,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -524,7 +538,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -585,30 +599,34 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd release-notes \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ - --version "${{ needs.determine-version.outputs.new_version }}" + --prev-tag "$PREV_TAG" \ + --version "$NEW_VERSION" # Consolidate all release notes files under .runtime_ci/release_notes/ before upload. # Mixing relative and absolute paths in upload-artifact causes path # resolution issues. Keep everything under one root. - name: Consolidate release notes + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | - VERSION="${{ needs.determine-version.outputs.new_version }}" - mkdir -p ".runtime_ci/release_notes/v${VERSION}" - cp /tmp/release_notes_body.md ".runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true - cp /tmp/migration_guide.md ".runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true - echo "Contents of .runtime_ci/release_notes/v${VERSION}/:" - ls -la ".runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || echo "(empty)" + mkdir -p ".runtime_ci/release_notes/v${NEW_VERSION}" + cp /tmp/release_notes_body.md ".runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true + cp /tmp/migration_guide.md ".runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true + echo "Contents of .runtime_ci/release_notes/v${NEW_VERSION}/:" + ls -la ".runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || echo "(empty)" - name: Ensure release notes artifact is non-empty shell: bash + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | - VERSION="${{ needs.determine-version.outputs.new_version }}" - mkdir -p ".runtime_ci/release_notes/v${VERSION}" - if [ ! -f ".runtime_ci/release_notes/v${VERSION}/release_notes_body.md" ]; then - echo "Release notes unavailable for v${VERSION}." > ".runtime_ci/release_notes/v${VERSION}/release_notes_body.md" + mkdir -p ".runtime_ci/release_notes/v${NEW_VERSION}" + if [ ! -f ".runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" ]; then + echo "Release notes unavailable for v${NEW_VERSION}." > ".runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" echo "Created fallback release_notes_body.md" fi @@ -646,12 +664,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -660,7 +679,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -693,34 +712,35 @@ jobs: merge-multiple: false - name: Prepare artifacts + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | - VERSION="${{ needs.determine-version.outputs.new_version }}" - mkdir -p ./artifacts ./.runtime_ci/version_bumps "./.runtime_ci/release_notes/v${VERSION}" + mkdir -p ./artifacts ./.runtime_ci/version_bumps "./.runtime_ci/release_notes/v${NEW_VERSION}" # Stage 3 release notes: downloaded artifact has release_notes/ root # so files land at ./release-notes-artifacts/vX.X.X/release_notes.md - if [ -d "./release-notes-artifacts/v${VERSION}" ]; then - cp -r "./release-notes-artifacts/v${VERSION}/"* "./.runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true - echo "Copied Stage 3 artifacts from release-notes-artifacts/v${VERSION}/" + if [ -d "./release-notes-artifacts/v${NEW_VERSION}" ]; then + cp -r "./release-notes-artifacts/v${NEW_VERSION}/"* "./.runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true + echo "Copied Stage 3 artifacts from release-notes-artifacts/v${NEW_VERSION}/" elif [ -d "./release-notes-artifacts" ]; then # Fallback: search recursively for release_notes.md FOUND=$(find ./release-notes-artifacts -name "release_notes.md" -type f 2>/dev/null | head -1) if [ -n "$FOUND" ]; then - cp "$(dirname "$FOUND")"/* "./.runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || true + cp "$(dirname "$FOUND")"/* "./.runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || true echo "Found release notes via recursive search: $FOUND" fi fi # Copy release_notes_body.md to /tmp/ for Dart script - if [ -f "./.runtime_ci/release_notes/v${VERSION}/release_notes_body.md" ]; then - cp "./.runtime_ci/release_notes/v${VERSION}/release_notes_body.md" /tmp/release_notes_body.md - elif [ -f "./.runtime_ci/release_notes/v${VERSION}/release_notes.md" ]; then - cp "./.runtime_ci/release_notes/v${VERSION}/release_notes.md" /tmp/release_notes_body.md + if [ -f "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" ]; then + cp "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes_body.md" /tmp/release_notes_body.md + elif [ -f "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes.md" ]; then + cp "./.runtime_ci/release_notes/v${NEW_VERSION}/release_notes.md" /tmp/release_notes_body.md fi # List what we found echo "Release notes contents:" - ls -la "./.runtime_ci/release_notes/v${VERSION}/" 2>/dev/null || echo "(empty)" + ls -la "./.runtime_ci/release_notes/v${NEW_VERSION}/" 2>/dev/null || echo "(empty)" # Merge all downloaded audit trail artifacts from different jobs into # a single .runtime_ci/runs/ directory so archive-run can find them. @@ -735,20 +755,25 @@ jobs: # the release. This replaces the old post-release archive that could # never work because .runtime_ci/runs/ didn't exist on the fresh runner. - name: Archive audit trail + env: + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} run: | dart run runtime_ci_tooling:manage_cicd archive-run \ - --version "${{ needs.determine-version.outputs.new_version }}" + --version "$NEW_VERSION" - name: Create release env: GH_TOKEN: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} GITHUB_TOKEN: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} + PREV_TAG: ${{ needs.determine-version.outputs.prev_tag }} + REPO_NAME: ${{ github.repository }} run: | dart run runtime_ci_tooling:manage_cicd create-release \ - --version "${{ needs.determine-version.outputs.new_version }}" \ - --prev-tag "${{ needs.determine-version.outputs.prev_tag }}" \ + --version "$NEW_VERSION" \ + --prev-tag "$PREV_TAG" \ --artifacts-dir ./artifacts \ - --repo "${{ github.repository }}" + --repo "$REPO_NAME" # ============================================================================ # Job 7: Post-Release Triage @@ -765,12 +790,13 @@ jobs: - name: Configure Git for HTTPS with Token shell: bash + env: + GH_PAT: ${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} run: | - TOKEN="${{ secrets.TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "git@github.com:" - git config --global url."https://x-access-token:${TOKEN}@github.com/".insteadOf "ssh://git@github.com/" - git config --global url."https://x-access-token:${TOKEN}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" - git config --global url."https://x-access-token:${TOKEN}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "git@github.com:" + git config --global url."https://x-access-token:${GH_PAT}@github.com/".insteadOf "ssh://git@github.com/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/open-runtime/".insteadOf "git@github.com:open-runtime/" + git config --global url."https://x-access-token:${GH_PAT}@github.com/pieces-app/".insteadOf "git@github.com:pieces-app/" - uses: dart-lang/setup-dart@v1.7.1 with: @@ -779,7 +805,7 @@ jobs: - name: Cache Dart pub dependencies uses: actions/cache@v5.0.3 with: - path: ~/.pub-cache + path: ${{ runner.os == 'Windows' && format('{0}\\Pub\\Cache', env.LOCALAPPDATA) || '~/.pub-cache' }} key: ${{ runner.os }}-dart-pub-${{ hashFiles('**/pubspec.yaml') }} restore-keys: ${{ runner.os }}-dart-pub- @@ -808,9 +834,11 @@ jobs: env: GEMINI_API_KEY: ${{ secrets.CICD_GEMINI_API_KEY_OPEN_RUNTIME }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NEW_VERSION: ${{ needs.determine-version.outputs.new_version }} + REPO_NAME: ${{ github.repository }} run: | dart run runtime_ci_tooling:manage_cicd triage post-release \ - --version "${{ needs.determine-version.outputs.new_version }}" \ - --release-tag "v${{ needs.determine-version.outputs.new_version }}" \ - --release-url "https://github.com/${{ github.repository }}/releases/tag/v${{ needs.determine-version.outputs.new_version }}" \ + --version "$NEW_VERSION" \ + --release-tag "v${NEW_VERSION}" \ + --release-url "https://github.com/${REPO_NAME}/releases/tag/v${NEW_VERSION}" \ --manifest /tmp/issue_manifest.json diff --git a/.runtime_ci/config.json b/.runtime_ci/config.json index 19e1c65..ef03610 100644 --- a/.runtime_ci/config.json +++ b/.runtime_ci/config.json @@ -66,6 +66,7 @@ "ci": { "dart_sdk": "3.9.2", "line_length": 120, + "artifact_retention_days": 7, "personal_access_token_secret": "TSAVO_AT_PIECES_PERSONAL_ACCESS_TOKEN", "features": { "proto": false, @@ -73,7 +74,9 @@ "format_check": true, "analysis_cache": false, "managed_analyze": false, - "managed_test": false + "managed_test": true, + "build_runner": true, + "web_test": false }, "secrets": {}, "sub_packages": [], diff --git a/.runtime_ci/template_versions.json b/.runtime_ci/template_versions.json index 5e662f9..549dbc9 100644 --- a/.runtime_ci/template_versions.json +++ b/.runtime_ci/template_versions.json @@ -1,6 +1,6 @@ { - "tooling_version": "0.14.0", - "updated_at": "2026-02-24T20:52:23.005924Z", + "tooling_version": "0.14.1", + "updated_at": "2026-02-25T04:26:33.911675Z", "templates": { "gemini_settings": { "hash": "93983f49dd2f40d2ed245271854946d8916b8f0698ed2cfaf12058305baa0b08", @@ -23,19 +23,19 @@ "updated_at": "2026-02-24T00:59:57.620091Z" }, "workflow_ci": { - "hash": "fa46da42c95dd420ebcaa28847ef76e6c5f5e634d9b62ddcfa11da6f561fa1c9", - "consumer_hash": "23ff33976e549dc469e84717d71e5d5ee182999cc000baab85cb7276f3db27d7", - "updated_at": "2026-02-24T20:52:23.006744Z" + "hash": "387908c7364598e498743b3b34bf3784f73b9ff424518198d7fd550b5d42321b", + "consumer_hash": "f2ffd43c36be64aed5bf7d5eec18d73e43a3b9200f085c291e85905286cdce75", + "updated_at": "2026-02-25T04:26:33.908748Z" }, "workflow_release": { - "hash": "326627cf41fdeb6cd61dae2fda98599d5815a34e63e4a8af1aaa8f7ad18435d3", - "consumer_hash": "326627cf41fdeb6cd61dae2fda98599d5815a34e63e4a8af1aaa8f7ad18435d3", - "updated_at": "2026-02-24T00:59:57.767027Z" + "hash": "dae21f893768272b85bae853c4b6d5013c6641762cf1536ea5fcce5123a0314c", + "consumer_hash": "dae21f893768272b85bae853c4b6d5013c6641762cf1536ea5fcce5123a0314c", + "updated_at": "2026-02-25T04:26:33.910948Z" }, "workflow_issue_triage": { - "hash": "a6364383fd2284c875e765ca183c94c9833248acbfd4ff039372efed8f14f47c", - "consumer_hash": "a6364383fd2284c875e765ca183c94c9833248acbfd4ff039372efed8f14f47c", - "updated_at": "2026-02-24T00:59:57.805191Z" + "hash": "229416f8f4e0a0ed87d655c2f280b616318665fa89a9aaa177de2a9ce0ccaed8", + "consumer_hash": "229416f8f4e0a0ed87d655c2f280b616318665fa89a9aaa177de2a9ce0ccaed8", + "updated_at": "2026-02-25T04:26:33.911680Z" } } } diff --git a/SETUP.md b/SETUP.md index 0d99b41..959a745 100644 --- a/SETUP.md +++ b/SETUP.md @@ -306,17 +306,36 @@ The CI workflow (`.github/workflows/ci.yaml`) is generated from your `ci` sectio | `dart_sdk` | string | **required** | Dart SDK version (e.g. `"3.9.2"`) | | `personal_access_token_secret` | string | `"GITHUB_TOKEN"` | GitHub secret name for PAT | | `line_length` | int/string | `120` | Line length for `dart format` checks (also controls the git pre-commit hook) | +| `artifact_retention_days` | int/string | `7` | Retention window for uploaded CI test artifacts (1-90 days) | | `features.proto` | bool | `false` | Enable protobuf generation step | | `features.lfs` | bool | `false` | Enable Git LFS checkout | | `features.format_check` | bool | `true` | Enable `dart format` check | | `features.analysis_cache` | bool | `false` | Cache analysis results across runs | | `features.managed_analyze` | bool | `false` | Run `dart analyze` via tooling | | `features.managed_test` | bool | `false` | Run `dart test` via tooling | +| `features.build_runner` | bool | `false` | Run `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test. In multi-platform mode this runs once in `analyze` and once per matrix `test` job. | +| `features.web_test` | bool | `false` | Add a standalone `web-test` job that runs `dart test -p chrome` on Ubuntu | +| `web_test.concurrency` | int | `1` | Number of concurrent browser test suites (1–32) | +| `web_test.paths` | list | `[]` | Specific test paths to run (empty = run all tests via `dart test -p chrome`). Paths are strictly validated (rules below). | | `platforms` | list | `["ubuntu"]` | Platform matrix. If 2+ entries, CI runs `analyze` once then `test` as a matrix. Valid: `ubuntu-x64`, `ubuntu-arm64`, `macos-arm64`, `macos-x64`, `windows-x64`, `windows-arm64` (plus aliases `ubuntu`, `macos`, `windows`). | | `runner_overrides` | object | `{}` | Override platform IDs to custom `runs-on` labels (e.g. org-managed GitHub-hosted runners). Example: `{ "ubuntu-arm64": "runtime-ubuntu-24.04-arm64-208gb-64core" }` | | `secrets` | object | `{}` | Additional secrets as `{ "ENV_NAME": "SECRET_NAME" }` | | `sub_packages` | list | `[]` | Sub-packages as `[{ "name": "...", "path": "..." }]` | +When `features.web_test` is `true`, the `web_test` object is optional; if omitted, defaults are used (`concurrency: 1`, `paths: []`). + +`web_test.paths` validation rules: +- Entries must be non-empty strings with no leading/trailing whitespace. +- Paths must be relative (no absolute paths, no `~`, no traversal like `..`). +- Paths must use forward slashes (`/`) and only `[A-Za-z0-9_./-]` characters. +- `.` (repo root), duplicates (after normalization), and leading `-` are rejected. + +Cross-validation rule: +- If `features.web_test` is `false`, omit `web_test` or set it to `{}`. Non-empty `web_test` config with the feature disabled is treated as dead config and fails validation. + +Artifact retention policy: +- CI test artifacts (logs, test-results, fixtures) default to **7 days** retention and can be overridden via `ci.artifact_retention_days` (1-90). + You can add custom steps before/after tests using user-preservable sections in the generated workflow — look for `# --- BEGIN USER: pre-test ---` and `# --- END USER: post-test ---` markers. To add additional jobs (including reusable workflow calls), @@ -407,6 +426,7 @@ The `validate` command checks: - TOML files contain required `prompt` and `description` keys - Dart files pass `dart analyze` - Markdown files exist and are non-empty +- `.runtime_ci/config.json` `ci` semantics via `WorkflowGenerator.validate()` (field rules + cross-validation such as `features.web_test` vs `web_test`) The `status` command shows: - Installation status of all required/optional tools @@ -449,13 +469,18 @@ The `.runtime_ci/config.json` file controls all behavior. Here is the complete s }, "cross_repo": { "enabled": true, + "orgs": ["your-org"], "repos": [ { "owner": "your-org", "repo": "dependent-repo", "relationship": "dependency" } - ] + ], + "discovery": { + "enabled": true, + "search_orgs": ["your-org"] + } }, "labels": { "type": ["bug", "feature-request", "enhancement", "documentation", "question"], @@ -508,6 +533,15 @@ The `.runtime_ci/config.json` file controls all behavior. Here is the complete s | `changelog_path` | `String` | `"CHANGELOG.md"` | Path to the CHANGELOG file | | `release_notes_path` | `String` | `"release_notes"` | Directory for release notes artifacts | +#### `sentry` + +| Key | Default | Description | +|---|---|---| +| `organization` | `""` | Sentry organization slug | +| `projects` | `[]` | List of Sentry project slugs to scan | +| `scan_on_pre_release` | `true` | Whether to scan Sentry errors during pre-release | +| `recent_errors_hours` | `168` | Hours of recent errors to include (168 = 7 days) | + #### `thresholds` Controls automated triage actions based on aggregated agent confidence: @@ -544,9 +578,12 @@ Available agents: `code_analysis`, `pr_correlation`, `duplicate`, `sentiment`, ` | Key | Default | Description | |---|---|---| | `enabled` | `true` | Enable cross-repository issue discovery and linking | +| `orgs` | `[]` | Optional allowlist of organizations for cross-repo operations | | `repos` | `[]` | List of dependent repositories to scan | +| `discovery.enabled` | `true` | Enable automatic discovery of related repositories | +| `discovery.search_orgs` | `[]` | Organizations scanned when discovery is enabled | -Each repo entry: `{ "owner": "...", "repo": "...", "relationship": "dependency|consumer|..." }` +Each repo entry: `{ "owner": "...", "repo": "...", "relationship": "dependency|consumer|related|..." }`. The default relationship is `related` when omitted. --- diff --git a/USAGE.md b/USAGE.md index bae4485..d6d85b2 100644 --- a/USAGE.md +++ b/USAGE.md @@ -141,7 +141,7 @@ dart run runtime_ci_tooling:manage_cicd init 4. Creates `.runtime_ci/config.json` with detected values (skipped if already present) 5. Creates `.runtime_ci/autodoc.json` from `lib/src/` directory structure (skipped if already present) 6. Creates a starter `CHANGELOG.md` if none exists -7. Installs `.git/hooks/pre-commit` to auto-format staged `lib/` Dart files before every commit +7. Installs `.git/hooks/pre-commit` to auto-format staged Dart files under `lib/` before every commit 8. Adds `.runtime_ci/runs/` to `.gitignore` 9. Prints a summary of all actions taken and suggested next steps @@ -501,14 +501,20 @@ dart run runtime_ci_tooling:manage_cicd create-release \ ### test -Run `dart test` excluding GCP-tagged tests. +Run `dart test` with enhanced output capture and job summary. ```bash dart run runtime_ci_tooling:manage_cicd test ``` -Runs `dart test --exclude-tags gcp`, parses output for pass/fail/skip counts, -and writes a GitHub Actions step summary. +**Enhanced managed test behavior (when `ci.features.managed_test=true`):** +- Excludes `gcp` and `integration` tags via `--exclude-tags gcp,integration` +- Uses JSON and expanded file reporters for full output capture (including `print()`, isolate output, FFI) +- Writes logs to `$TEST_LOG_DIR` (CI) or `.dart_tool/test-logs/` (local) +- Generates a rich GitHub Actions step summary with pass/fail/skip counts and failure details +- Runs tests in sub-packages (from `ci.sub_packages`) with `pub get` per package + +When `ci.features.managed_test=false`, CI falls back to plain `dart test` with no enhanced capture/reporting. --- @@ -1243,17 +1249,25 @@ final exists = await commandExists('git'); **Jobs:** 1. `pre-check` — Skip bot commits (author `github-actions[bot]` or `[skip ci]`) -2. Optional `auto-format` — If `ci.features.format_check=true`, auto-format `lib/` and push `bot(format)` commit +2. Optional `auto-format` — If `ci.features.format_check=true`, runs `dart format --line-length .`, stages tracked `*.dart` updates, and pushes a `bot(format)` commit 3. **Single-platform mode** (default, `ci.platforms` missing or 1 entry): - `analyze-and-test` — Verify protos, run analysis, run tests 4. **Multi-platform mode** (`ci.platforms` has 2+ entries): - `analyze` — Run analysis once (Ubuntu) - `test` — Run tests as a matrix across OS+arch (`x64` + `arm64`) +5. Optional `web-test` — If `ci.features.web_test=true`, runs `dart test -p chrome` in a standalone Ubuntu job with deterministic Chrome provisioning via SHA-pinned `browser-actions/setup-chrome@v2.1.1` **Platform matrix configuration:** - `ci.platforms`: list of platform IDs (e.g. `["ubuntu-x64","ubuntu-arm64","macos-arm64","macos-x64","windows-x64","windows-arm64"]`) - `ci.runner_overrides`: optional map to point platform IDs at custom `runs-on` labels (e.g. org-managed GitHub-hosted runners) +**Optional features:** +- `ci.features.build_runner`: When `true`, runs `dart run build_runner build --delete-conflicting-outputs` before analyze, test, and web-test steps to regenerate `.g.dart` codegen files +- `ci.features.web_test`: When `true`, adds a `web-test` job that provisions Chrome via SHA-pinned `browser-actions/setup-chrome@v2.1.1` and runs `dart test -p chrome`. Configure via `ci.web_test`: + - `concurrency` (1–32, default `1`): parallel test shards + - `paths`: list of relative repo paths (e.g. `["test/web/"]`): paths are normalized, shell-quoted, and validated (no traversal, no shell metacharacters). Empty list = run all tests +- `ci.artifact_retention_days`: Optional retention period for uploaded test artifacts (1–90, default `7`) + **Key steps:** ```yaml - run: dart run runtime_ci_tooling:manage_cicd verify-protos diff --git a/bin/manage_cicd.dart b/bin/manage_cicd.dart index 4a05d3f..16ef966 100644 --- a/bin/manage_cicd.dart +++ b/bin/manage_cicd.dart @@ -1,6 +1,7 @@ import 'dart:io'; import 'package:runtime_ci_tooling/src/cli/manage_cicd_cli.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/exit_util.dart'; Future main(List args) async { final cli = ManageCicdCli(); @@ -8,6 +9,6 @@ Future main(List args) async { await cli.run(args); } on UsageException catch (e) { stderr.writeln(e); - exit(64); + await exitWithCode(64); } } diff --git a/lib/src/cli/commands/create_release_command.dart b/lib/src/cli/commands/create_release_command.dart index 6348b90..3d22561 100644 --- a/lib/src/cli/commands/create_release_command.dart +++ b/lib/src/cli/commands/create_release_command.dart @@ -2,6 +2,7 @@ import 'dart:convert'; import 'dart:io'; import 'package:args/command_runner.dart'; +import 'package:path/path.dart' as p; import 'package:pub_semver/pub_semver.dart'; import '../../triage/utils/config.dart'; @@ -70,7 +71,12 @@ class CreateReleaseCommand extends Command { // Step 1: Copy artifacts if provided if (artifactsDir != null) { - final artDir = Directory('$repoRoot/$artifactsDir'); + final artifactsPath = p.normalize(p.join(repoRoot, artifactsDir)); + if (!(artifactsPath == repoRoot || p.isWithin(repoRoot, artifactsPath))) { + Logger.error('--artifacts-dir must resolve within the repository root'); + exit(1); + } + final artDir = Directory(artifactsPath); if (artDir.existsSync()) { for (final name in ['CHANGELOG.md', 'README.md']) { final src = File('${artDir.path}/$name'); @@ -236,8 +242,13 @@ class CreateReleaseCommand extends Command { // Step 4: Commit all changes Logger.info('Configuring git identity for release commit'); - CiProcessRunner.exec('git', ['config', 'user.name', 'github-actions[bot]'], cwd: repoRoot, verbose: global.verbose); - CiProcessRunner.exec( + await CiProcessRunner.exec( + 'git', + ['config', 'user.name', 'github-actions[bot]'], + cwd: repoRoot, + verbose: global.verbose, + ); + await CiProcessRunner.exec( 'git', ['config', 'user.email', 'github-actions[bot]@users.noreply.github.com'], cwd: repoRoot, @@ -264,7 +275,7 @@ class CreateReleaseCommand extends Command { for (final path in filesToAdd) { final fullPath = '$repoRoot/$path'; if (File(fullPath).existsSync() || Directory(fullPath).existsSync()) { - CiProcessRunner.exec('git', ['add', path], cwd: repoRoot, verbose: global.verbose); + await CiProcessRunner.exec('git', ['add', path], cwd: repoRoot, verbose: global.verbose); } } @@ -281,7 +292,7 @@ class CreateReleaseCommand extends Command { // Use a temp file for the commit message to avoid shell escaping issues final commitMsgFile = File('$repoRoot/.git/RELEASE_COMMIT_MSG'); commitMsgFile.writeAsStringSync(commitMsg); - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['commit', '-F', commitMsgFile.path], cwd: repoRoot, @@ -295,7 +306,7 @@ class CreateReleaseCommand extends Command { final remoteRepo = Platform.environment['GITHUB_REPOSITORY'] ?? effectiveRepo; if (ghToken != null && remoteRepo.isNotEmpty) { Logger.info('Setting authenticated remote URL for push'); - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['remote', 'set-url', 'origin', 'https://x-access-token:$ghToken@github.com/$remoteRepo.git'], cwd: repoRoot, @@ -311,15 +322,27 @@ class CreateReleaseCommand extends Command { if (pushResult.exitCode != 0) { Logger.warn('Direct push failed (non-fast-forward); fetching and merging remote changes...'); if (global.verbose) Logger.info(' push stderr: ${(pushResult.stderr as String).trim()}'); - CiProcessRunner.exec('git', ['fetch', 'origin', 'main'], cwd: repoRoot, fatal: true, verbose: global.verbose); - CiProcessRunner.exec( + await CiProcessRunner.exec( + 'git', + ['fetch', 'origin', 'main'], + cwd: repoRoot, + fatal: true, + verbose: global.verbose, + ); + await CiProcessRunner.exec( 'git', ['merge', 'origin/main', '--no-edit'], cwd: repoRoot, fatal: true, verbose: global.verbose, ); - CiProcessRunner.exec('git', ['push', 'origin', 'main'], cwd: repoRoot, fatal: true, verbose: global.verbose); + await CiProcessRunner.exec( + 'git', + ['push', 'origin', 'main'], + cwd: repoRoot, + fatal: true, + verbose: global.verbose, + ); } Logger.success('Committed and pushed changes'); } else { @@ -333,14 +356,14 @@ class CreateReleaseCommand extends Command { Logger.error('Tag $tag already exists. Cannot create release.'); exit(1); } - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['tag', '-a', tag, '-m', 'Release v$newVersion'], cwd: repoRoot, fatal: true, verbose: global.verbose, ); - CiProcessRunner.exec('git', ['push', 'origin', tag], cwd: repoRoot, fatal: true, verbose: global.verbose); + await CiProcessRunner.exec('git', ['push', 'origin', tag], cwd: repoRoot, fatal: true, verbose: global.verbose); Logger.success('Created tag: $tag'); // Step 5b: Create per-package tags for sub-packages with tag_pattern @@ -356,14 +379,20 @@ class CreateReleaseCommand extends Command { continue; } try { - CiProcessRunner.exec( + await CiProcessRunner.exec( 'git', ['tag', '-a', pkgTag, '-m', '${pkg['name']} v$newVersion'], cwd: repoRoot, fatal: true, verbose: global.verbose, ); - CiProcessRunner.exec('git', ['push', 'origin', pkgTag], cwd: repoRoot, fatal: true, verbose: global.verbose); + await CiProcessRunner.exec( + 'git', + ['push', 'origin', pkgTag], + cwd: repoRoot, + fatal: true, + verbose: global.verbose, + ); pkgTagsCreated.add(pkgTag); } catch (e) { Logger.error('Failed to create per-package tag $pkgTag: $e'); @@ -400,7 +429,7 @@ class CreateReleaseCommand extends Command { final ghArgs = ['release', 'create', tag, '--title', 'v$newVersion', '--notes', releaseBody]; if (effectiveRepo.isNotEmpty) ghArgs.addAll(['--repo', effectiveRepo]); - CiProcessRunner.exec('gh', ghArgs, cwd: repoRoot, verbose: global.verbose); + await CiProcessRunner.exec('gh', ghArgs, cwd: repoRoot, verbose: global.verbose); Logger.success('Created GitHub Release: $tag'); // Build rich summary diff --git a/lib/src/cli/commands/init_command.dart b/lib/src/cli/commands/init_command.dart index cf9422f..ce162d6 100644 --- a/lib/src/cli/commands/init_command.dart +++ b/lib/src/cli/commands/init_command.dart @@ -134,6 +134,7 @@ class InitCommand extends Command { 'dart_sdk': '3.9.2', 'personal_access_token_secret': 'GITHUB_TOKEN', 'line_length': 120, + 'artifact_retention_days': 7, 'features': { 'proto': false, 'lfs': false, @@ -141,6 +142,8 @@ class InitCommand extends Command { 'analysis_cache': true, 'managed_analyze': true, 'managed_test': true, + 'build_runner': false, + 'web_test': false, }, 'secrets': {}, 'sub_packages': [], diff --git a/lib/src/cli/commands/test_command.dart b/lib/src/cli/commands/test_command.dart index 18c73ed..ccd198c 100644 --- a/lib/src/cli/commands/test_command.dart +++ b/lib/src/cli/commands/test_command.dart @@ -1,61 +1,156 @@ import 'dart:async'; +import 'dart:convert'; import 'dart:io'; import 'package:args/command_runner.dart'; +import 'package:path/path.dart' as p; import '../../triage/utils/config.dart'; +import '../utils/exit_util.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; +import '../utils/step_summary.dart'; +import '../utils/test_results_util.dart'; import '../utils/sub_package_utils.dart'; +import '../utils/utf8_bounded_buffer.dart'; -/// Run `dart test` on the root package and all configured sub-packages. +typedef _ExitHandler = Future Function(int code); + +/// Run `dart test` on the root package and all configured sub-packages with +/// full output capture (two-layer strategy). +/// +/// **Layer 1 — Zone-aware reporters:** `--file-reporter json:` captures all +/// `print()` calls as `PrintEvent` objects with test attribution, and +/// `--file-reporter expanded:` captures human-readable output. +/// +/// **Layer 2 — Shell-level `tee`:** Configured in the CI template to capture +/// anything that bypasses Dart zones (`stdout.write()`, isolate prints, FFI). +/// +/// All log files are written to `$TEST_LOG_DIR` (set by CI template) or +/// `/.dart_tool/test-logs/` locally. class TestCommand extends Command { + /// Maximum bytes to buffer per stdout/stderr stream to prevent OOM. + static const int _maxLogBufferBytes = 2 * 1024 * 1024; // 2MB + /// Maximum bytes for pub get output (typically small). + static const int _maxPubGetBufferBytes = 512 * 1024; // 512KB @override final String name = 'test'; @override - final String description = 'Run dart test.'; + final String description = 'Run dart test with full output capture and job summary.'; @override Future run() async { final repoRoot = RepoUtils.findRepoRoot(); if (repoRoot == null) { Logger.error('Could not find ${config.repoName} repo root.'); - exit(1); + await exitWithCode(1); } + await runWithRoot(repoRoot); + } + /// Run tests with an explicit [repoRoot], preserving the contract from + /// manage_cicd when invoked as `manage_cicd test` (CWD may differ from root). + static Future runWithRoot( + String repoRoot, { + Duration processTimeout = const Duration(minutes: 45), + Duration pubGetTimeout = const Duration(minutes: 5), + _ExitHandler exitHandler = exitWithCode, + }) async { Logger.header('Running dart test'); - const processTimeout = Duration(minutes: 45); final failures = []; + // Determine log directory: TEST_LOG_DIR (CI) or .dart_tool/test-logs/ (local) + final logDir = await _resolveLogDirOrExit(repoRoot, exitHandler); + Logger.info('Log directory: $logDir'); + + final jsonPath = p.join(logDir, 'results.json'); + final expandedPath = p.join(logDir, 'expanded.txt'); + // Skip gracefully if no test/ directory exists - final testDir = Directory('$repoRoot/test'); + final testDir = Directory(p.join(repoRoot, 'test')); if (!testDir.existsSync()) { Logger.success('No test/ directory found — skipping root tests'); + StepSummary.write('## Test Results\n\n**No test/ directory found — skipped.**\n'); } else { - // Use Process.start for streaming output instead of Process.runSync. - // This ensures real-time output in CI (runSync buffers everything until - // exit, so a hanging test produces zero output). - final process = await Process.start( - Platform.resolvedExecutable, - ['test', '--exclude-tags', 'gcp,integration'], - workingDirectory: repoRoot, - mode: ProcessStartMode.inheritStdio, - ); + // Build test arguments with two file reporters + expanded console output + final testArgs = [ + 'test', + '--exclude-tags', + 'gcp,integration', + '--chain-stack-traces', + '--reporter', + 'expanded', + '--file-reporter', + 'json:$jsonPath', + '--file-reporter', + 'expanded:$expandedPath', + ]; + + Logger.info('Running: dart ${testArgs.join(' ')}'); + + // Use Process.start with piped output so we can both stream to console + // AND capture the full output for summary generation. + final process = await Process.start(Platform.resolvedExecutable, testArgs, workingDirectory: repoRoot); + + // Stream stdout and stderr to console in real-time while capturing + // (byte-bounded to prevent OOM from runaway test output). + const truncationSuffix = '\n\n... (output truncated, exceeded 2MB bytes). See console.log for full output.)'; + final stdoutBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: truncationSuffix); + final stderrBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: truncationSuffix); + + void onStdout(String data) { + stdout.write(data); + stdoutBuf.append(data); + } + + void onStderr(String data) { + stderr.write(data); + stderrBuf.append(data); + } + + final stdoutSub = process.stdout.transform(Utf8Decoder(allowMalformed: true)).listen(onStdout); + final stderrSub = process.stderr.transform(Utf8Decoder(allowMalformed: true)).listen(onStderr); + + final stdoutDone = stdoutSub.asFuture(); + final stderrDone = stderrSub.asFuture(); // Process-level timeout: kill the test process if it exceeds 45 minutes. - // Individual test timeouts should catch hangs, but this is a safety net - // for cases where the test process itself doesn't exit (e.g., leaked - // isolates, open sockets keeping the event loop alive). - final exitCode = await process.exitCode.timeout( - processTimeout, - onTimeout: () { - Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - process.kill(ProcessSignal.sigkill); - return -1; - }, - ); + // On Unix: SIGTERM first, await up to 5s; if still alive, SIGKILL and await. + // On Windows: single kill, then await exit. + int exitCode; + try { + exitCode = await process.exitCode.timeout(processTimeout); + } on TimeoutException { + Logger.error('Test process exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + exitCode = await _killAndAwaitExit(process); + } + + try { + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); + } catch (_) { + // Process killed or streams timed out + } finally { + await stdoutSub.cancel(); + await stderrSub.cancel(); + } + + // Write console output to log files + try { + RepoUtils.writeFileSafely(p.join(logDir, 'dart_stdout.log'), stdoutBuf.toString()); + if (!stderrBuf.isEmpty) { + RepoUtils.writeFileSafely(p.join(logDir, 'dart_stderr.log'), stderrBuf.toString()); + } + } on FileSystemException catch (e) { + Logger.warn('Could not write log files: $e'); + } + + // Parse the JSON results file for structured test data + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + + // Generate and write the rich job summary + TestResultsUtil.writeTestJobSummary(results, exitCode); if (exitCode != 0) { Logger.error('Root tests failed with exit code $exitCode'); @@ -72,7 +167,7 @@ class TestCommand extends Command { for (final sp in subPackages) { final name = sp['name'] as String; final path = sp['path'] as String; - final dir = '$repoRoot/$path'; + final dir = p.join(repoRoot, path); Logger.header('Testing sub-package: $name ($path)'); @@ -81,14 +176,14 @@ class TestCommand extends Command { continue; } - if (!File('$dir/pubspec.yaml').existsSync()) { + if (!File(p.join(dir, 'pubspec.yaml')).existsSync()) { Logger.error(' No pubspec.yaml in $dir — cannot test'); failures.add(name); continue; } // Skip sub-packages with no test/ directory - final spTestDir = Directory('$dir/test'); + final spTestDir = Directory(p.join(dir, 'test')); if (!spTestDir.existsSync()) { Logger.info(' No test/ directory in $name — skipping'); continue; @@ -96,12 +191,18 @@ class TestCommand extends Command { // Ensure dependencies are resolved (sub-packages have independent // pubspec.yaml files that the root `dart pub get` may not cover). - final pubGetResult = Process.runSync( - Platform.resolvedExecutable, - ['pub', 'get'], - workingDirectory: dir, - environment: {'GIT_LFS_SKIP_SMUDGE': '1'}, + // Use Process.start so we can kill on timeout (Process.run would hang). + final pubGetResult = await _runPubGetWithTimeout( + dir, + pubGetTimeout, + onTimeout: () { + Logger.error(' dart pub get timed out for $name (${pubGetTimeout.inMinutes}-minute limit)'); + }, ); + if (pubGetResult == null) { + failures.add(name); + continue; + } if (pubGetResult.exitCode != 0) { final pubGetStderr = (pubGetResult.stderr as String).trim(); if (pubGetStderr.isNotEmpty) Logger.error(pubGetStderr); @@ -110,21 +211,80 @@ class TestCommand extends Command { continue; } - final spProcess = await Process.start( - Platform.resolvedExecutable, - ['test', '--exclude-tags', 'gcp,integration'], - workingDirectory: dir, - mode: ProcessStartMode.inheritStdio, - ); + final spLogDir = p.join(logDir, name); + try { + RepoUtils.ensureSafeDirectory(spLogDir); + } on FileSystemException catch (e) { + Logger.error('Cannot use sub-package log directory for $name: $e'); + failures.add(name); + continue; + } + final spJsonPath = p.join(spLogDir, 'results.json'); + final spExpandedPath = p.join(spLogDir, 'expanded.txt'); - final spExitCode = await spProcess.exitCode.timeout( - processTimeout, - onTimeout: () { - Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); - spProcess.kill(ProcessSignal.sigkill); - return -1; - }, - ); + final spTestArgs = [ + 'test', + '--exclude-tags', + 'gcp,integration', + '--chain-stack-traces', + '--reporter', + 'expanded', + '--file-reporter', + 'json:$spJsonPath', + '--file-reporter', + 'expanded:$spExpandedPath', + ]; + + final spProcess = await Process.start(Platform.resolvedExecutable, spTestArgs, workingDirectory: dir); + + const spTruncationSuffix = '\n\n... (output truncated, exceeded 2MB bytes). See console.log for full output.)'; + final stdoutBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: spTruncationSuffix); + final stderrBuf = Utf8BoundedBuffer(maxBytes: _maxLogBufferBytes, truncationSuffix: spTruncationSuffix); + + void onSpStdout(String data) { + stdout.write(data); + stdoutBuf.append(data); + } + + void onSpStderr(String data) { + stderr.write(data); + stderrBuf.append(data); + } + + final stdoutSub = spProcess.stdout.transform(Utf8Decoder(allowMalformed: true)).listen(onSpStdout); + final stderrSub = spProcess.stderr.transform(Utf8Decoder(allowMalformed: true)).listen(onSpStderr); + + int spExitCode; + try { + spExitCode = await spProcess.exitCode.timeout(processTimeout); + } on TimeoutException { + Logger.error('Test process for $name exceeded ${processTimeout.inMinutes}-minute timeout — killing.'); + spExitCode = await _killAndAwaitExit(spProcess); + } + + try { + await Future.wait([ + stdoutSub.asFuture(), + stderrSub.asFuture(), + ]).timeout(const Duration(seconds: 30)); + } catch (_) { + // Process killed or streams timed out + } finally { + await stdoutSub.cancel(); + await stderrSub.cancel(); + } + + try { + RepoUtils.writeFileSafely(p.join(spLogDir, 'dart_stdout.log'), stdoutBuf.toString()); + if (!stderrBuf.isEmpty) { + RepoUtils.writeFileSafely(p.join(spLogDir, 'dart_stderr.log'), stderrBuf.toString()); + } + } on FileSystemException catch (e) { + Logger.warn('Could not write sub-package log files: $e'); + } + + final spResults = await TestResultsUtil.parseTestResultsJson(spJsonPath); + TestResultsUtil.writeTestJobSummary(spResults, spExitCode, platformId: name); if (spExitCode != 0) { Logger.error('Tests failed for $name (exit code $spExitCode)'); @@ -136,9 +296,101 @@ class TestCommand extends Command { if (failures.isNotEmpty) { Logger.error('Tests failed for ${failures.length} package(s): ${failures.join(', ')}'); - exit(1); + final failureBullets = failures.map((name) => '- `${StepSummary.escapeHtml(name)}`').join('\n'); + StepSummary.write('\n## Sub-package Test Failures\n\n$failureBullets\n'); + await exitHandler(1); } Logger.success('All tests passed'); + await stdout.flush(); + await stderr.flush(); + } + + /// Runs `dart pub get` in [workingDirectory] with [timeout]. Kills the process + /// on timeout to avoid indefinite hangs. Returns null on timeout. + static Future _runPubGetWithTimeout( + String workingDirectory, + Duration timeout, { + void Function()? onTimeout, + }) async { + final process = await Process.start( + Platform.resolvedExecutable, + ['pub', 'get'], + workingDirectory: workingDirectory, + environment: {'GIT_LFS_SKIP_SMUDGE': '1'}, + ); + const pubGetTruncationSuffix = '\n\n... (output truncated).'; + final stdoutBuf = Utf8BoundedBuffer(maxBytes: _maxPubGetBufferBytes, truncationSuffix: pubGetTruncationSuffix); + final stderrBuf = Utf8BoundedBuffer(maxBytes: _maxPubGetBufferBytes, truncationSuffix: pubGetTruncationSuffix); + + final stdoutDone = Completer(); + final stderrDone = Completer(); + final stdoutSub = process.stdout + .transform(Utf8Decoder(allowMalformed: true)) + .listen( + (data) => stdoutBuf.append(data), + onDone: () => stdoutDone.complete(), + onError: (_) => stdoutDone.complete(), + ); + final stderrSub = process.stderr + .transform(Utf8Decoder(allowMalformed: true)) + .listen( + (data) => stderrBuf.append(data), + onDone: () => stderrDone.complete(), + onError: (_) => stderrDone.complete(), + ); + try { + final exitCode = await process.exitCode.timeout(timeout); + await Future.wait([ + stdoutDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + stderrDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + ]); + await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); + return ProcessResult(process.pid, exitCode, stdoutBuf.toString(), stderrBuf.toString()); + } on TimeoutException { + onTimeout?.call(); + await _killAndAwaitExit(process); + try { + await Future.wait([ + stdoutDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + stderrDone.future.timeout(const Duration(seconds: 5), onTimeout: () {}), + ]); + await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); + } catch (_) {} + return null; + } + } + + /// Kills [process] and awaits exit. On Unix: SIGTERM first, wait up to 5s; + /// if still alive, SIGKILL and await. On Windows: single kill, then await. + /// Returns -1 to indicate timeout-induced kill. + static Future _killAndAwaitExit(Process process) async { + if (Platform.isWindows) { + process.kill(); + await process.exitCode; + return -1; + } + process.kill(ProcessSignal.sigterm); + try { + await process.exitCode.timeout(const Duration(seconds: 5)); + } on TimeoutException { + process.kill(ProcessSignal.sigkill); + await process.exitCode; + } + return -1; + } + + static Future _resolveLogDirOrExit(String repoRoot, _ExitHandler exitHandler) async { + try { + final logDir = RepoUtils.resolveTestLogDir(repoRoot); + RepoUtils.ensureSafeDirectory(logDir); + return logDir; + } on StateError catch (e) { + Logger.error('$e'); + await exitHandler(1); + } on FileSystemException catch (e) { + Logger.error('Cannot use log directory: $e'); + await exitHandler(1); + } } } diff --git a/lib/src/cli/commands/update_all_command.dart b/lib/src/cli/commands/update_all_command.dart index 189302d..74de8f6 100644 --- a/lib/src/cli/commands/update_all_command.dart +++ b/lib/src/cli/commands/update_all_command.dart @@ -1,6 +1,5 @@ // ignore_for_file: avoid_print -import 'dart:async'; import 'dart:io'; import 'package:args/command_runner.dart'; @@ -10,6 +9,7 @@ import '../../triage/utils/config.dart' show kConfigFileName; import '../manage_cicd_cli.dart'; import '../options/update_all_options.dart'; import '../utils/logger.dart'; +import '../utils/process_runner.dart'; /// Batch-update all packages under a root directory. /// @@ -235,11 +235,14 @@ class UpdateAllCommand extends Command { execArgs = ['run', 'runtime_ci_tooling:manage_cicd', ...args]; } - final result = await Process.run( + final result = await CiProcessRunner.runWithTimeout( executable, execArgs, workingDirectory: pkg.path, - ).timeout(const Duration(minutes: 5), onTimeout: () => ProcessResult(0, 124, '', 'Timed out after 5 minutes')); + timeout: const Duration(minutes: 5), + timeoutExitCode: 124, + timeoutMessage: 'Timed out after 5 minutes', + ); sw.stop(); diff --git a/lib/src/cli/commands/update_command.dart b/lib/src/cli/commands/update_command.dart index b6682fb..f91f052 100644 --- a/lib/src/cli/commands/update_command.dart +++ b/lib/src/cli/commands/update_command.dart @@ -527,7 +527,7 @@ class UpdateCommand extends Command { required String prefix, }) { for (final key in source.keys) { - if (key == '_comment') continue; + if (key == '_comment' || key.startsWith('_comment_')) continue; final fullKey = prefix.isEmpty ? key : '$prefix.$key'; if (!target.containsKey(key)) { diff --git a/lib/src/cli/commands/validate_command.dart b/lib/src/cli/commands/validate_command.dart index 8eed099..0ba1ce1 100644 --- a/lib/src/cli/commands/validate_command.dart +++ b/lib/src/cli/commands/validate_command.dart @@ -8,6 +8,7 @@ import '../../triage/utils/config.dart'; import '../utils/ci_constants.dart'; import '../utils/logger.dart'; import '../utils/repo_utils.dart'; +import '../utils/workflow_generator.dart'; /// Validate all configuration files. class ValidateCommand extends Command { @@ -88,6 +89,29 @@ class ValidateCommand extends Command { } } + Logger.info(''); + Logger.info('Checking semantic CI config...'); + try { + final ciConfig = WorkflowGenerator.loadCiConfig(repoRoot); + if (ciConfig == null) { + Logger.info('No .runtime_ci/config.json ci section found — skipping semantic CI validation'); + } else { + final ciErrors = WorkflowGenerator.validate(ciConfig); + if (ciErrors.isEmpty) { + Logger.success('Valid CI config semantics: .runtime_ci/config.json#ci'); + } else { + Logger.error('Invalid CI config semantics: .runtime_ci/config.json#ci'); + for (final err in ciErrors) { + Logger.error(' - $err'); + } + allValid = false; + } + } + } on StateError catch (e) { + Logger.error('$e'); + allValid = false; + } + // Validate Stage 1 artifacts Logger.info(''); Logger.info('Checking Stage 1 artifacts from previous runs...'); diff --git a/lib/src/cli/manage_cicd.dart b/lib/src/cli/manage_cicd.dart index e77a8d3..79dc164 100644 --- a/lib/src/cli/manage_cicd.dart +++ b/lib/src/cli/manage_cicd.dart @@ -5,7 +5,9 @@ import 'dart:io'; import '../triage/utils/run_context.dart'; import '../triage/utils/config.dart'; +import 'commands/test_command.dart'; import 'options/manage_cicd_options.dart'; +import 'utils/step_summary.dart'; // Re-export path constants from run_context for use throughout this file. // All CI artifacts live under .runtime_ci/ at the repo root: @@ -430,7 +432,7 @@ Future _runExplore(String repoRoot) async { _error('Ensure runtime_ci_tooling is properly installed (dart pub get).'); exit(1); } - final prompt = _runSync('dart run $promptScriptPath "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync('dart run $promptScriptPath ${_shellEscape(prevTag)} ${_shellEscape(newVersion)}', repoRoot); if (prompt.isEmpty) { _error('Prompt generator produced empty output. Check $promptScriptPath'); exit(1); @@ -450,7 +452,7 @@ Future _runExplore(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh)'", @@ -585,7 +587,7 @@ Future _runCompose(String repoRoot) async { _error('Prompt script not found: $composerScript'); exit(1); } - final prompt = _runSync('dart run $composerScript "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync('dart run $composerScript ${_shellEscape(prevTag)} ${_shellEscape(newVersion)}', repoRoot); if (prompt.isEmpty) { _error('Composer prompt generator produced empty output.'); exit(1); @@ -634,11 +636,11 @@ Future _runCompose(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh)' " - '${includes.join(" ")}', + '${includes.map(_shellEscape).join(" ")}', ], workingDirectory: repoRoot, environment: {...Platform.environment}, @@ -804,7 +806,10 @@ Future _runReleaseNotes(String repoRoot) async { _error('Prompt script not found: $rnScript'); exit(1); } - final prompt = _runSync('dart run $rnScript "$prevTag" "$newVersion" "$bumpType"', repoRoot); + final prompt = _runSync( + 'dart run $rnScript ${_shellEscape(prevTag)} ${_shellEscape(newVersion)} ${_shellEscape(bumpType)}', + repoRoot, + ); if (prompt.isEmpty) { _error('Release notes prompt generator produced empty output.'); exit(1); @@ -848,12 +853,12 @@ Future _runReleaseNotes(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' // Expanded tool access: git, gh, AND shell commands for reading files "--allowed-tools 'run_shell_command(git),run_shell_command(gh),run_shell_command(cat),run_shell_command(head),run_shell_command(tail)' " - '${includes.join(" ")}', + '${includes.map(_shellEscape).join(" ")}', ], workingDirectory: repoRoot, environment: {...Platform.environment}, @@ -976,7 +981,7 @@ List> _gatherVerifiedContributors(String repoRoot, String pr // Step 1: Get one commit SHA per unique author email in the release range final gitResult = Process.runSync('sh', [ '-c', - 'git log "$prevTag"..HEAD --format="%H %ae" --no-merges | sort -u -k2,2', + 'git log ${_shellEscape(prevTag)}..HEAD --format="%H %ae" --no-merges | sort -u -k2,2', ], workingDirectory: repoRoot); if (gitResult.exitCode != 0) { @@ -1357,7 +1362,7 @@ Future _generateAutodocFile({ if (libDir.isNotEmpty) promptArgs.add(libDir); if (docType == 'migration' && previousHash.isNotEmpty) promptArgs.add(previousHash); - final prompt = _runSync('dart run $repoRoot/$templatePath ${promptArgs.map((a) => '"$a"').join(' ')}', repoRoot); + final prompt = _runSync('dart run $repoRoot/$templatePath ${promptArgs.map(_shellEscape).join(' ')}', repoRoot); if (prompt.isEmpty) { _warn(' [$moduleId] Empty prompt for $docType, skipping'); @@ -1398,7 +1403,10 @@ Do not skip any -- completeness is more important than brevity. final pass1Result = Process.runSync( 'sh', - ['-c', 'cat ${pass1Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], + [ + '-c', + 'cat ${_shellEscape(pass1Prompt.path)} | gemini --yolo -m $kGeminiProModel ${includes.map(_shellEscape).join(" ")}', + ], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1487,7 +1495,10 @@ Write the corrected file to the same path: $absOutputFile final pass2Result = Process.runSync( 'sh', - ['-c', 'cat ${pass2Prompt.path} | gemini --yolo -m $kGeminiProModel ${includes.join(" ")}'], + [ + '-c', + 'cat ${_shellEscape(pass2Prompt.path)} | gemini --yolo -m $kGeminiProModel ${includes.map(_shellEscape).join(" ")}', + ], workingDirectory: repoRoot, environment: {...Platform.environment}, ); @@ -1513,7 +1524,7 @@ Write the corrected file to the same path: $absOutputFile /// Compute SHA256 hash of all source files in the given paths. String _computeModuleHash(String repoRoot, List sourcePaths) { // Use git to compute a hash of the directory contents - final paths = sourcePaths.map((p) => '$repoRoot/$p').join(' '); + final paths = sourcePaths.map((p) => _shellEscape('$repoRoot/$p')).join(' '); final result = Process.runSync('sh', [ '-c', 'find $paths -type f \\( -name "*.proto" -o -name "*.dart" \\) 2>/dev/null | sort | xargs cat 2>/dev/null | sha256sum | cut -d" " -f1', @@ -1815,8 +1826,11 @@ Future _runDetermineVersion(String repoRoot, List args) async { _success('Version bump rationale saved to $kVersionBumpsDir/v$newVersion.md'); } else { // Generate basic rationale - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); - final commits = _runSync('git log "$prevTag"..HEAD --oneline --no-merges 2>/dev/null | head -20', repoRoot); + final commitCount = _runSync('git rev-list --count ${_shellEscape(prevTag)}..HEAD 2>/dev/null', repoRoot); + final commits = _runSync( + 'git log ${_shellEscape(prevTag)}..HEAD --oneline --no-merges 2>/dev/null | head -20', + repoRoot, + ); File(targetPath).writeAsStringSync( '# Version Bump: v$newVersion\n\n' '**Date**: ${DateTime.now().toUtc().toIso8601String()}\n' @@ -2140,70 +2154,19 @@ ${_artifactLink()} '''); } -/// Run dart test. +/// Run dart test with full output capture (two-layer strategy). +/// +/// Layer 1 — Zone-aware reporters: `--file-reporter json:` captures all +/// `print()` calls as `PrintEvent` objects with test attribution, and +/// `--file-reporter expanded:` captures human-readable output. +/// +/// Layer 2 — Shell-level `tee` (configured in CI template) captures anything +/// that bypasses Dart zones (`stdout.write()`, isolate prints, FFI output). +/// +/// All log files are written to [logDir] (`$TEST_LOG_DIR` in CI, or +/// `/.dart_tool/test-logs/` locally). Future _runTest(String repoRoot) async { - _header('Running Tests'); - - // Skip gracefully if no test/ directory exists - final testDir = Directory('$repoRoot/test'); - if (!testDir.existsSync()) { - _success('No test/ directory found — skipping tests'); - _writeStepSummary(''' -## Test Results - -**No test/ directory found — skipped.** -'''); - return; - } - - final result = await Process.run('dart', ['test', '--exclude-tags', 'gcp'], workingDirectory: repoRoot); - final output = result.stdout as String; - stdout.write(output); - stderr.write(result.stderr); - // Parse test output for summary (before potential exit) - final passMatch = RegExp(r'(\d+) tests? passed').firstMatch(output); - final failMatch = RegExp(r'(\d+) failed').firstMatch(output); - final skipMatch = RegExp(r'(\d+) skipped').firstMatch(output); - final passed = passMatch?.group(1) ?? '?'; - final failed = failMatch?.group(1) ?? '0'; - final skipped = skipMatch?.group(1) ?? '0'; - - // Truncate output for collapsible (keep last 5000 chars if huge) - final testOutputPreview = output.length > 5000 - ? '... (truncated)\n${output.substring(output.length - 5000)}' - : output; - - if (result.exitCode != 0) { - _error('Tests failed (exit code ${result.exitCode})'); - // Write failure summary BEFORE exiting - _writeStepSummary(''' -## Test Results -- FAILED - -| Metric | Count | -|--------|-------| -| Passed | $passed | -| Failed | **$failed** | -| Skipped | $skipped | - -${_collapsible('Test Output', '```\n$testOutputPreview\n```', open: true)} -'''); - exit(result.exitCode); - } - _success('All tests passed'); - - _writeStepSummary(''' -## Test Results - -| Metric | Count | -|--------|-------| -| Passed | **$passed** | -| Failed | $failed | -| Skipped | $skipped | - -**All tests passed.** - -${_collapsible('Test Output', '```\n$testOutputPreview\n```')} -'''); + await TestCommand.runWithRoot(repoRoot); } /// Run dart analyze and fail only on actual errors. @@ -2332,7 +2295,7 @@ Future _runDocumentation(String repoRoot) async { _error('Prompt script not found: $docScript'); exit(1); } - final prompt = _runSync('dart run $docScript "$prevTag" "$newVersion"', repoRoot); + final prompt = _runSync('dart run $docScript ${_shellEscape(prevTag)} ${_shellEscape(newVersion)}', repoRoot); if (prompt.isEmpty) { _error('Documentation prompt generator produced empty output.'); exit(1); @@ -2360,11 +2323,11 @@ Future _runDocumentation(String repoRoot) async { 'sh', [ '-c', - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh),run_shell_command(cat),run_shell_command(head)' " - '${includes.join(" ")}', + '${includes.map(_shellEscape).join(" ")}', ], workingDirectory: repoRoot, environment: {...Platform.environment}, @@ -2785,8 +2748,11 @@ String _detectNextVersion(String repoRoot, String prevTag) { var patch = int.tryParse(parts[2]) ?? 0; // ── Pass 1: Fast regex heuristic (fallback if Gemini unavailable) ── - final commits = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s%n%b" 2>/dev/null', repoRoot); - final commitSubjects = _runSync('git log "$prevTag"..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', repoRoot); + final commits = _runSync('git log ${_shellEscape(prevTag)}..HEAD --pretty=format:"%s%n%b" 2>/dev/null', repoRoot); + final commitSubjects = _runSync( + 'git log ${_shellEscape(prevTag)}..HEAD --pretty=format:"%s" --no-merges 2>/dev/null', + repoRoot, + ); var bump = 'patch'; if (RegExp(r'(BREAKING CHANGE|^[a-z]+(\(.+\))?!:)', multiLine: true).hasMatch(commits)) { @@ -2808,9 +2774,12 @@ String _detectNextVersion(String repoRoot, String prevTag) { // ── Pass 2: Gemini analysis (authoritative, overrides regex if available) ── if (_commandExists('gemini') && Platform.environment['GEMINI_API_KEY'] != null) { - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); - final changedFiles = _runSync('git diff --name-only "$prevTag"..HEAD 2>/dev/null | head -30', repoRoot); - final diffStat = _runSync('git diff --stat "$prevTag"..HEAD 2>/dev/null | tail -5', repoRoot); + final commitCount = _runSync('git rev-list --count ${_shellEscape(prevTag)}..HEAD 2>/dev/null', repoRoot); + final changedFiles = _runSync( + 'git diff --name-only ${_shellEscape(prevTag)}..HEAD 2>/dev/null | head -30', + repoRoot, + ); + final diffStat = _runSync('git diff --stat ${_shellEscape(prevTag)}..HEAD 2>/dev/null | tail -5', repoRoot); final existingTags = _runSync("git tag -l 'v*' --sort=-version:refname | head -10", repoRoot); final commitSummary = commits.split('\n').take(50).join('\n'); @@ -2857,7 +2826,7 @@ String _detectNextVersion(String repoRoot, String prevTag) { final promptPath = '${versionAnalysisDir.path}/prompt.txt'; File(promptPath).writeAsStringSync(prompt); final geminiResult = _runSync( - 'cat $promptPath | gemini ' + 'cat ${_shellEscape(promptPath)} | gemini ' '-o json --yolo ' '-m $kGeminiProModel ' "--allowed-tools 'run_shell_command(git),run_shell_command(gh)' " @@ -3049,7 +3018,7 @@ String _buildReleaseCommitMessage({ } // Commit range - final commitCount = _runSync('git rev-list --count "$prevTag"..HEAD 2>/dev/null', repoRoot); + final commitCount = _runSync('git rev-list --count ${_shellEscape(prevTag)}..HEAD 2>/dev/null', repoRoot); buf.writeln('---'); buf.writeln('Automated release by CI/CD pipeline (Gemini CLI + GitHub Actions)'); buf.writeln('Commits since $prevTag: $commitCount'); @@ -3068,6 +3037,17 @@ bool _commandExists(String command) { } } +/// Escapes a string for safe interpolation into a shell command. +/// +/// Uses POSIX single-quote style: the value is wrapped in single quotes, and +/// any single quotes within are escaped as `'"'"'` (end quote, literal quote, +/// start quote). This prevents shell injection when user-controlled values +/// (prevTag, newVersion, bumpType, promptArgs, paths, etc.) are interpolated +/// into _runSync or Process.runSync shell commands. +String _shellEscape(String s) { + return "'${s.replaceAll("'", "'\"'\"'")}'"; +} + String _runSync(String command, String workingDirectory) { if (_verbose) _info('[CMD] $command'); final result = Process.runSync('sh', ['-c', command], workingDirectory: workingDirectory); @@ -3079,44 +3059,29 @@ String _runSync(String command, String workingDirectory) { /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). void _writeStepSummary(String markdown) { - final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; - if (summaryFile != null) { - File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); - } + StepSummary.write(markdown); } // ── Step Summary Helpers ───────────────────────────────────────────────────── /// Build a link to the current workflow run's artifacts page. String _artifactLink([String label = 'View all artifacts']) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY']; - final runId = Platform.environment['GITHUB_RUN_ID']; - if (repo == null || runId == null) return ''; - return '[$label]($server/$repo/actions/runs/$runId)'; + return StepSummary.artifactLink(label); } /// Build a GitHub compare link between two refs. String _compareLink(String prevTag, String newTag, [String? label]) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - final text = label ?? '$prevTag...$newTag'; - return '[$text]($server/$repo/compare/$prevTag...$newTag)'; + return StepSummary.compareLink(prevTag, newTag, label); } /// Build a link to a file/path in the repository. String _ghLink(String label, String path) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - final sha = Platform.environment['GITHUB_SHA'] ?? 'main'; - return '[$label]($server/$repo/blob/$sha/$path)'; + return StepSummary.ghLink(label, path); } /// Build a link to a GitHub Release by tag. String _releaseLink(String tag) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - return '[v$tag]($server/$repo/releases/tag/$tag)'; + return StepSummary.releaseLink(tag); } /// Add Keep a Changelog reference-style links to the bottom of CHANGELOG.md. @@ -3171,9 +3136,7 @@ void _addChangelogReferenceLinks(String repoRoot, String content) { /// Wrap content in a collapsible
block for step summaries. String _collapsible(String title, String content, {bool open = false}) { - if (content.trim().isEmpty) return ''; - final openAttr = open ? ' open' : ''; - return '\n\n$title\n\n$content\n\n
\n'; + return StepSummary.collapsible(title, content, open: open); } /// Read a file and return its content, or a fallback message if not found. diff --git a/lib/src/cli/utils/exit_util.dart b/lib/src/cli/utils/exit_util.dart new file mode 100644 index 0000000..0a84322 --- /dev/null +++ b/lib/src/cli/utils/exit_util.dart @@ -0,0 +1,13 @@ +import 'dart:io'; + +/// Flush stdout and stderr before exiting so final messages are not lost. +/// Ignores flush errors (e.g. when running under dart test with captured streams). +Future exitWithCode(int code) async { + try { + await stdout.flush(); + } catch (_) {} + try { + await stderr.flush(); + } catch (_) {} + exit(code); +} diff --git a/lib/src/cli/utils/process_runner.dart b/lib/src/cli/utils/process_runner.dart index d15aec0..453a8f7 100644 --- a/lib/src/cli/utils/process_runner.dart +++ b/lib/src/cli/utils/process_runner.dart @@ -1,7 +1,13 @@ +import 'dart:async'; +import 'dart:convert'; import 'dart:io'; +import 'exit_util.dart'; import 'logger.dart'; +/// Maximum bytes to capture per stdout/stderr stream for timeout runs. +const int _kMaxOutputBytes = 32 * 1024; // 32KB + /// Utilities for running external processes. abstract final class CiProcessRunner { /// Patterns that look like tokens/secrets — redact before logging. @@ -45,13 +51,123 @@ abstract final class CiProcessRunner { return output; } - /// Execute a command. Set [fatal] to true to exit on failure. - static void exec(String executable, List args, {String? cwd, bool fatal = false, bool verbose = false}) { + /// Execute a command. Set [fatal] to true to exit on failure (flushes stdout/stderr before exiting). + static Future exec( + String executable, + List args, { + String? cwd, + bool fatal = false, + bool verbose = false, + }) async { if (verbose) Logger.info(' \$ ${_redact('$executable ${args.join(" ")}')}'); final result = Process.runSync(executable, args, workingDirectory: cwd); if (result.exitCode != 0) { - Logger.error(' Command failed (exit ${result.exitCode}): ${result.stderr}'); - if (fatal) exit(result.exitCode); + final stderr = _redact((result.stderr as String).trim()); + Logger.error(' Command failed (exit ${result.exitCode}): $stderr'); + if (fatal) await exitWithCode(result.exitCode); + } + } + + /// Runs [executable] with [arguments] and [timeout]. On timeout, kills the + /// process (TERM then KILL on Unix; single kill on Windows) and returns a + /// [ProcessResult] with [timeoutExitCode] and stderr containing [timeoutMessage]. + /// Captures stdout/stderr with bounded buffers ([_kMaxOutputBytes] per stream). + static Future runWithTimeout( + String executable, + List arguments, { + String? workingDirectory, + Duration timeout = const Duration(minutes: 5), + int timeoutExitCode = 124, + String timeoutMessage = 'Timed out', + }) async { + final process = await Process.start(executable, arguments, workingDirectory: workingDirectory); + final stdoutBuf = StringBuffer(); + final stderrBuf = StringBuffer(); + final stdoutBytes = [0]; + final stderrBytes = [0]; + final stdoutTruncated = [false]; + final stderrTruncated = [false]; + const truncationSuffix = '\n\n... (output truncated).'; + final truncationBytes = utf8.encode(truncationSuffix).length; + + void capWrite(StringBuffer buf, String data, int maxBytes, List truncated, List byteCount) { + if (truncated[0]) return; + final dataBytes = utf8.encode(data).length; + if (byteCount[0] + dataBytes <= maxBytes) { + buf.write(data); + byteCount[0] += dataBytes; + } else { + final remainingTotal = maxBytes - byteCount[0]; + if (remainingTotal <= truncationBytes) { + truncated[0] = true; + return; + } + final payloadBudget = remainingTotal - truncationBytes; + final bytes = utf8.encode(data); + final toTake = bytes.length > payloadBudget ? payloadBudget : bytes.length; + if (toTake > 0) { + buf.write(utf8.decode(bytes.take(toTake).toList(), allowMalformed: true)); + byteCount[0] += toTake; + } + buf.write(truncationSuffix); + byteCount[0] += truncationBytes; + truncated[0] = true; + } + } + + final stdoutSub = process.stdout + .transform(Utf8Decoder(allowMalformed: true)) + .listen((data) => capWrite(stdoutBuf, data, _kMaxOutputBytes, stdoutTruncated, stdoutBytes)); + final stderrSub = process.stderr + .transform(Utf8Decoder(allowMalformed: true)) + .listen((data) => capWrite(stderrBuf, data, _kMaxOutputBytes, stderrTruncated, stderrBytes)); + final stdoutDone = stdoutSub.asFuture(); + final stderrDone = stderrSub.asFuture(); + + var exitCode = timeoutExitCode; + var timedOut = false; + try { + exitCode = await process.exitCode.timeout(timeout); + } on TimeoutException { + timedOut = true; + await killAndAwaitExit(process); + } + + try { + await Future.wait([stdoutDone, stderrDone]).timeout(const Duration(seconds: 30)); + } catch (_) { + // Best-effort drain complete. + } finally { + try { + await Future.wait([stdoutSub.cancel(), stderrSub.cancel()]); + } catch (_) {} + } + + if (timedOut) { + return ProcessResult(process.pid, timeoutExitCode, stdoutBuf.toString(), timeoutMessage); + } + + return ProcessResult(process.pid, exitCode, stdoutBuf.toString(), stderrBuf.toString()); + } + + /// Kills [process] and awaits exit. On Unix: SIGTERM first, wait up to 5s; + /// if still alive, SIGKILL and await. On Windows: single kill, then await. + static Future killAndAwaitExit(Process process) async { + if (Platform.isWindows) { + process.kill(); + try { + await process.exitCode.timeout(const Duration(seconds: 10)); + } on TimeoutException { + // Best-effort on Windows; caller has already timed out. + } + return; + } + process.kill(ProcessSignal.sigterm); + try { + await process.exitCode.timeout(const Duration(seconds: 5)); + } on TimeoutException { + process.kill(ProcessSignal.sigkill); + await process.exitCode; } } } diff --git a/lib/src/cli/utils/repo_utils.dart b/lib/src/cli/utils/repo_utils.dart index 2cf90e1..b096ce8 100644 --- a/lib/src/cli/utils/repo_utils.dart +++ b/lib/src/cli/utils/repo_utils.dart @@ -1,9 +1,13 @@ import 'dart:io'; +import 'package:path/path.dart' as p; + import '../../triage/utils/config.dart'; /// Utilities for finding and working with the repository root. abstract final class RepoUtils { + static final RegExp _controlChars = RegExp(r'[\r\n\t\x00-\x1f]'); + /// Find the repository root by walking up and looking for pubspec.yaml /// with the matching package name from config. static String? findRepoRoot() { @@ -22,4 +26,64 @@ abstract final class RepoUtils { } return null; } + + /// Resolve and validate the test log directory. + /// + /// - Defaults to `/.dart_tool/test-logs` when TEST_LOG_DIR is unset. + /// - If TEST_LOG_DIR is provided, it must be an absolute path and (when + /// RUNNER_TEMP is set) must stay within RUNNER_TEMP. + static String resolveTestLogDir(String repoRoot, {Map? environment}) { + final env = environment ?? Platform.environment; + final defaultDir = p.join(repoRoot, '.dart_tool', 'test-logs'); + final raw = env['TEST_LOG_DIR']; + if (raw == null) return defaultDir; + + final trimmed = raw.trim(); + if (trimmed.isEmpty) return defaultDir; + if (_controlChars.hasMatch(trimmed)) { + throw StateError('TEST_LOG_DIR must not contain newlines or control characters'); + } + + final normalized = p.normalize(trimmed); + if (!p.isAbsolute(normalized)) { + throw StateError('TEST_LOG_DIR must be an absolute path'); + } + + final runnerTempRaw = env['RUNNER_TEMP']?.trim(); + if (runnerTempRaw != null && runnerTempRaw.isNotEmpty) { + if (_controlChars.hasMatch(runnerTempRaw)) { + throw StateError('RUNNER_TEMP must not contain newlines or control characters'); + } + final runnerTemp = p.normalize(runnerTempRaw); + if (!(normalized == runnerTemp || p.isWithin(runnerTemp, normalized))) { + throw StateError('TEST_LOG_DIR must be within RUNNER_TEMP: "$runnerTemp"'); + } + } + + return normalized; + } + + /// Return true when the path itself is a symlink. + static bool isSymlinkPath(String path) { + return FileSystemEntity.typeSync(path, followLinks: false) == FileSystemEntityType.link; + } + + /// Create a directory if needed, and refuse symlink-backed paths. + static void ensureSafeDirectory(String dirPath) { + if (isSymlinkPath(dirPath)) { + throw FileSystemException('Refusing to use symlink directory', dirPath); + } + Directory(dirPath).createSync(recursive: true); + if (isSymlinkPath(dirPath)) { + throw FileSystemException('Refusing to use symlink directory', dirPath); + } + } + + /// Write file content while refusing symlink targets. + static void writeFileSafely(String filePath, String content, {FileMode mode = FileMode.write}) { + if (isSymlinkPath(filePath)) { + throw FileSystemException('Refusing to write through symlink', filePath); + } + File(filePath).writeAsStringSync(content, mode: mode); + } } diff --git a/lib/src/cli/utils/step_summary.dart b/lib/src/cli/utils/step_summary.dart index a0975c0..7304602 100644 --- a/lib/src/cli/utils/step_summary.dart +++ b/lib/src/cli/utils/step_summary.dart @@ -1,54 +1,122 @@ +import 'dart:convert'; import 'dart:io'; import '../../triage/utils/config.dart'; +import 'logger.dart'; +import 'repo_utils.dart'; /// Step summary utilities for GitHub Actions. abstract final class StepSummary { + /// Maximum safe size for $GITHUB_STEP_SUMMARY (1 MiB minus 4 KiB buffer). + static const int _maxSummaryBytes = (1024 * 1024) - (4 * 1024); + static final RegExp _repoSlugPattern = RegExp(r'^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$'); + static final RegExp _numericPattern = RegExp(r'^\d+$'); + static final RegExp _refPattern = RegExp(r'^[A-Za-z0-9._/-]+$'); + /// Write a markdown summary to $GITHUB_STEP_SUMMARY (visible in Actions UI). /// No-op when running locally (env var not set). - static void write(String markdown) { - final summaryFile = Platform.environment['GITHUB_STEP_SUMMARY']; - if (summaryFile != null) { - File(summaryFile).writeAsStringSync(markdown, mode: FileMode.append); + /// Skips appending if the file would exceed the 1 MiB GitHub limit. + /// [environment] overrides Platform.environment (for testing). + static void write(String markdown, {Map? environment}) { + final env = environment ?? Platform.environment; + final summaryFile = env['GITHUB_STEP_SUMMARY']; + if (summaryFile == null || summaryFile.trim().isEmpty) return; + if (RepoUtils.isSymlinkPath(summaryFile)) { + Logger.warn('Refusing to write step summary through symlink: $summaryFile'); + return; + } + final file = File(summaryFile); + final currentSize = file.existsSync() ? file.lengthSync() : 0; + // Use UTF-8 byte length (not markdown.length) — GitHub limit is 1 MiB. + final markdownBytes = utf8.encode(markdown).length; + if (currentSize + markdownBytes > _maxSummaryBytes) { + Logger.warn('Step summary approaching 1 MiB limit — skipping append'); + return; + } + try { + RepoUtils.writeFileSafely(summaryFile, markdown, mode: FileMode.append); + } on FileSystemException catch (e) { + Logger.warn('Could not write step summary: $e'); } } /// Build a link to the current workflow run's artifacts page. static String artifactLink([String label = 'View all artifacts']) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY']; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']); final runId = Platform.environment['GITHUB_RUN_ID']; if (repo == null || runId == null) return ''; + if (!_numericPattern.hasMatch(runId)) return ''; return '[$label]($server/$repo/actions/runs/$runId)'; } /// Build a GitHub compare link between two refs. static String compareLink(String prevTag, String newTag, [String? label]) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']) ?? '${config.repoOwner}/${config.repoName}'; final text = label ?? '$prevTag...$newTag'; return '[$text]($server/$repo/compare/$prevTag...$newTag)'; } /// Build a link to a file/path in the repository. static String ghLink(String label, String path) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; - final sha = Platform.environment['GITHUB_SHA'] ?? 'main'; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']) ?? '${config.repoOwner}/${config.repoName}'; + final sha = _safeRef(Platform.environment['GITHUB_SHA']) ?? 'main'; return '[$label]($server/$repo/blob/$sha/$path)'; } /// Build a link to a GitHub Release by tag. static String releaseLink(String tag) { - final server = Platform.environment['GITHUB_SERVER_URL'] ?? 'https://github.com'; - final repo = Platform.environment['GITHUB_REPOSITORY'] ?? '${config.repoOwner}/${config.repoName}'; + final server = _safeGitHubServerUrl(Platform.environment['GITHUB_SERVER_URL']); + final repo = _safeRepoSlug(Platform.environment['GITHUB_REPOSITORY']) ?? '${config.repoOwner}/${config.repoName}'; return '[v$tag]($server/$repo/releases/tag/$tag)'; } /// Wrap content in a collapsible
block for step summaries. + /// Escapes title and content to prevent HTML injection (e.g. closing tags like + ///
) from breaking structure or executing unsafe HTML. static String collapsible(String title, String content, {bool open = false}) { if (content.trim().isEmpty) return ''; final openAttr = open ? ' open' : ''; - return '\n\n$title\n\n$content\n\n\n'; + final safeTitle = escapeHtml(title); + // Escape content to prevent , , ', + stackTrace: 'fake', + printOutput: '', + durationMs: 0, + ), + ); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('```')); + expect(summary!, contains('Error: ')); + expect(summary!, contains('fake')); + }); + + test('handles adversarial backtick content in failure output', () { + String? summary; + final results = _parsed(passed: 0, failed: 1, skipped: 0); + results.failures.add( + TestFailure( + name: 'backtick test', + error: '`' * 140 + 'content' + '`' * 140, + stackTrace: '', + printOutput: '', + durationMs: 0, + ), + ); + + TestResultsUtil.writeTestJobSummary( + results, + 1, + platformId: 'linux', + writeSummary: (markdown) => summary = markdown, + ); + + expect(summary, isNotNull); + expect(summary!, contains('### Failed Tests')); + expect(summary!, contains('backtick test')); + // Fence should be longer than content's backticks; output should be valid + expect(summary!.contains('`' * 141), isTrue); + }); + }); + + group('Utf8BoundedBuffer', () { + test('appends full content when under byte limit', () { + final buffer = Utf8BoundedBuffer(maxBytes: 20, truncationSuffix: '...[truncated]'); + buffer.append('hello'); + buffer.append(' world'); + expect(buffer.isTruncated, isFalse); + expect(buffer.toString(), equals('hello world')); + expect(buffer.byteLength, equals(11)); + }); + + test('truncates at UTF-8 rune boundaries and appends suffix', () { + final buffer = Utf8BoundedBuffer(maxBytes: 10, truncationSuffix: '...'); + buffer.append('aaaaaa'); + buffer.append('語語語'); // each 語 is 3 bytes + expect(buffer.isTruncated, isTrue); + expect(buffer.toString(), equals('aaaaaa...')); + expect(buffer.byteLength, equals(9)); + }); + + test('never exceeds maxBytes even when suffix is longer than remaining budget', () { + final buffer = Utf8BoundedBuffer(maxBytes: 4, truncationSuffix: '...[truncated]'); + buffer.append('abcdefgh'); + expect(buffer.isTruncated, isTrue); + expect(utf8.encode(buffer.toString()).length, lessThanOrEqualTo(4)); + }); + }); + + group('StepSummary', () { + test('write uses byte size not char size for limit guard', () { + // GitHub step summary limit is 1 MiB; guard must use UTF-8 byte count. + // Multi-byte chars (e.g. 語) have more bytes than chars — old bug used + // markdown.length (chars) and could overflow. + late Directory tempDir; + tempDir = Directory.systemTemp.createTempSync('step_summary_bytes_'); + try { + final summaryPath = p.join(tempDir.path, 'summary.md'); + const maxBytes = (1024 * 1024) - (4 * 1024); + // Fill to maxBytes - 2 so that "語" (3 bytes) would exceed + File(summaryPath).writeAsStringSync('x' * (maxBytes - 2)); + expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); + + StepSummary.write('語', environment: {'GITHUB_STEP_SUMMARY': summaryPath}); + // Should skip append (would exceed); file size unchanged + expect(File(summaryPath).lengthSync(), equals(maxBytes - 2)); + } finally { + if (tempDir.existsSync()) tempDir.deleteSync(recursive: true); + } + }); + + test('collapsible escapes content to prevent HTML injection', () { + final out = StepSummary.collapsible( + 'Title', + 'Content with and ', + ); + expect(out, contains('</summary>')); + expect(out, contains('<script>')); + expect(out, contains('</details>')); + expect(out, contains('<img')); + expect(out, isNot(contains(''))); + expect(out, contains('
')); + expect(out, contains('
')); + }); + }); + + group('SubPackageUtils.loadSubPackages', () { + late Directory tempDir; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('sub_pkg_load_'); + }); + + tearDown(() { + if (tempDir.existsSync()) tempDir.deleteSync(recursive: true); + }); + + void _writeConfig(Map ci) { + final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'ci': ci})); + } + + test('returns empty when no sub_packages', () { + _writeConfig({'dart_sdk': '3.9.2', 'features': {}}); + expect(SubPackageUtils.loadSubPackages(tempDir.path), isEmpty); + }); + + test('valid sub-packages pass through', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'core', 'path': 'packages/core'}, + {'name': 'api', 'path': 'packages/api'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result.length, equals(2)); + expect(result[0]['name'], equals('core')); + expect(result[0]['path'], equals('packages/core')); + expect(result[1]['name'], equals('api')); + expect(result[1]['path'], equals('packages/api')); + }); + + test('skips invalid name (unsupported chars)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'foo bar', 'path': 'packages/foo'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('skips invalid path (traversal)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'evil', 'path': '../../../etc/passwd'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('skips invalid path (absolute)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'foo', 'path': '/usr/local'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('skips invalid path (leading dash)', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'foo', 'path': '--help'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result, isEmpty); + }); + + test('valid entries pass when mixed with invalid', () { + _writeConfig({ + 'dart_sdk': '3.9.2', + 'features': {}, + 'sub_packages': [ + {'name': 'bad', 'path': '../../../etc'}, + {'name': 'good', 'path': 'packages/good'}, + ], + }); + final result = SubPackageUtils.loadSubPackages(tempDir.path); + expect(result.length, equals(1)); + expect(result[0]['name'], equals('good')); + expect(result[0]['path'], equals('packages/good')); + }); + }); + + group('CiProcessRunner.exec', () { + test('fatal path exits with process exit code after flushing stdout/stderr', () async { + final scriptPath = p.join(p.current, 'test', 'scripts', 'fatal_exit_probe.dart'); + final result = Process.runSync(Platform.resolvedExecutable, ['run', scriptPath], runInShell: false); + final expectedCode = Platform.isWindows ? 7 : 1; + expect(result.exitCode, equals(expectedCode), reason: 'fatal exec should exit with failing command exit code'); + }); + }); + + group('CiProcessRunner.runWithTimeout', () { + test('completes normally when process finishes within timeout', () async { + final result = await CiProcessRunner.runWithTimeout(Platform.resolvedExecutable, [ + '--version', + ], timeout: const Duration(seconds: 10)); + expect(result.exitCode, equals(0)); + expect(result.stdout, contains('Dart')); + }); + + test('returns timeout result and kills process when timeout exceeded', () async { + final executable = Platform.isWindows ? 'ping' : 'sleep'; + final args = Platform.isWindows ? ['127.0.0.1', '-n', '60'] : ['60']; + final result = await CiProcessRunner.runWithTimeout( + executable, + args, + timeout: const Duration(milliseconds: 500), + timeoutExitCode: 124, + timeoutMessage: 'Timed out', + ); + expect(result.exitCode, equals(124)); + expect(result.stderr, equals('Timed out')); + }); + }); +} diff --git a/test/scripts/fatal_exit_probe.dart b/test/scripts/fatal_exit_probe.dart new file mode 100644 index 0000000..ff9a9b0 --- /dev/null +++ b/test/scripts/fatal_exit_probe.dart @@ -0,0 +1,14 @@ +// Probe script for testing CiProcessRunner.exec with fatal=true. +// Run: dart run test/scripts/fatal_exit_probe.dart +// Expected: exits with code 1 (or 7 on Windows when using exit 7). +import 'dart:io'; + +import 'package:runtime_ci_tooling/src/cli/utils/process_runner.dart'; + +Future main() async { + if (Platform.isWindows) { + await CiProcessRunner.exec('cmd', ['/c', 'exit', '7'], fatal: true); + } else { + await CiProcessRunner.exec('false', [], fatal: true); + } +} diff --git a/test/test_command_test.dart b/test/test_command_test.dart new file mode 100644 index 0000000..88b4ff4 --- /dev/null +++ b/test/test_command_test.dart @@ -0,0 +1,182 @@ +import 'dart:io'; +import 'dart:convert'; + +import 'package:path/path.dart' as p; +import 'package:test/test.dart'; + +import 'package:runtime_ci_tooling/src/cli/commands/test_command.dart'; +import 'package:runtime_ci_tooling/src/cli/utils/test_results_util.dart'; +import 'package:runtime_ci_tooling/src/triage/utils/config.dart'; + +class _TestExit implements Exception { + final int code; + _TestExit(this.code); +} + +Future _throwingExit(int code) async { + throw _TestExit(code); +} + +void main() { + group('TestCommand.runWithRoot', () { + late Directory tempDir; + + setUp(() { + tempDir = Directory.systemTemp.createTempSync('test_command_'); + }); + + tearDown(() { + if (tempDir.existsSync()) { + tempDir.deleteSync(recursive: true); + } + }); + + void writeRootPubspec({bool includeTest = false}) { + final deps = includeTest ? 'dev_dependencies:\n test: ^1.24.0\n' : ''; + File( + p.join(tempDir.path, 'pubspec.yaml'), + ).writeAsStringSync('name: ${config.repoName}\nversion: 0.0.0\nenvironment:\n sdk: ^3.0.0\n$deps'); + } + + void writeSubPackageConfig(List> subPackages) { + final configDir = Directory(p.join(tempDir.path, '.runtime_ci'))..createSync(recursive: true); + File(p.join(configDir.path, 'config.json')).writeAsStringSync( + jsonEncode({ + 'ci': { + 'dart_sdk': '3.9.2', + 'features': {'proto': false, 'lfs': false}, + 'sub_packages': subPackages, + }, + }), + ); + } + + test('skips root tests and succeeds when no test/ directory exists', () async { + // Minimal repo: pubspec with matching name, no test/ + writeRootPubspec(); + + // Completes without throwing or exit(1); StepSummary.write is no-op when + // GITHUB_STEP_SUMMARY is unset (local runs). + await TestCommand.runWithRoot(tempDir.path); + }); + + test('uses passed repoRoot for log directory resolution', () async { + // Create minimal repo + writeRootPubspec(); + + await TestCommand.runWithRoot(tempDir.path); + + // Log dir should be under repoRoot when TEST_LOG_DIR is unset + final expectedLogDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); + expect(Directory(expectedLogDir).existsSync(), isTrue); + }); + + test('runs root tests, writes results.json, and StepSummary pathway produces valid output', () async { + // Minimal repo with a passing test to exercise full TestCommand flow + writeRootPubspec(includeTest: true); + Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); + File(p.join(tempDir.path, 'test', 'passing_test.dart')).writeAsStringSync(''' +import 'package:test/test.dart'; + +void main() { + test('passes', () => expect(1 + 1, equals(2))); +} +'''); + // Resolve dependencies so dart test can run + final pubGet = await Process.run('dart', ['pub', 'get'], workingDirectory: tempDir.path); + expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); + + await TestCommand.runWithRoot(tempDir.path); + + final logDir = p.join(tempDir.path, '.dart_tool', 'test-logs'); + expect(Directory(logDir).existsSync(), isTrue, reason: 'log dir should be created'); + + // results.json or expanded.txt are written by file reporters + final jsonPath = p.join(logDir, 'results.json'); + final expandedPath = p.join(logDir, 'expanded.txt'); + final hasResults = File(jsonPath).existsSync() || File(expandedPath).existsSync(); + expect(hasResults, isTrue, reason: 'at least one reporter output should exist'); + + // If results.json exists, verify parse + writeTestJobSummary pathway + if (File(jsonPath).existsSync()) { + final results = await TestResultsUtil.parseTestResultsJson(jsonPath); + expect(results.parsed, isTrue); + expect(results.passed, greaterThanOrEqualTo(1)); + expect(results.failed, equals(0)); + + String? capturedSummary; + TestResultsUtil.writeTestJobSummary( + results, + 0, + platformId: 'test-runner', + writeSummary: (m) => capturedSummary = m, + ); + expect(capturedSummary, isNotNull); + expect(capturedSummary!, contains('## Test Results — test-runner')); + expect(capturedSummary!, contains('passed')); + } + }); + + test('exits with code 1 when root tests fail', () async { + writeRootPubspec(includeTest: true); + Directory(p.join(tempDir.path, 'test')).createSync(recursive: true); + File(p.join(tempDir.path, 'test', 'failing_test.dart')).writeAsStringSync(''' +import 'package:test/test.dart'; + +void main() { + test('fails', () => expect(1, equals(2))); +} +'''); + + final pubGet = await Process.run('dart', ['pub', 'get'], workingDirectory: tempDir.path); + expect(pubGet.exitCode, equals(0), reason: 'dart pub get must succeed'); + + await expectLater( + () => TestCommand.runWithRoot(tempDir.path, exitHandler: _throwingExit), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); + }); + + test('exits when configured sub-package directory has no pubspec.yaml', () async { + writeRootPubspec(); + writeSubPackageConfig([ + {'name': 'pkg_a', 'path': 'packages/pkg_a'}, + ]); + Directory(p.join(tempDir.path, 'packages', 'pkg_a')).createSync(recursive: true); + + await expectLater( + () => TestCommand.runWithRoot(tempDir.path, exitHandler: _throwingExit), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); + }); + + test('exits when sub-package pub get times out', () async { + writeRootPubspec(); + writeSubPackageConfig([ + {'name': 'pkg_timeout', 'path': 'packages/pkg_timeout'}, + ]); + final pkgDir = Directory(p.join(tempDir.path, 'packages', 'pkg_timeout'))..createSync(recursive: true); + File(p.join(pkgDir.path, 'pubspec.yaml')).writeAsStringSync(''' +name: pkg_timeout +version: 0.0.0 +environment: + sdk: ^3.0.0 +dev_dependencies: + test: ^1.24.0 +'''); + Directory(p.join(pkgDir.path, 'test')).createSync(recursive: true); + File(p.join(pkgDir.path, 'test', 'noop_test.dart')).writeAsStringSync(''' +import 'package:test/test.dart'; + +void main() { + test('noop', () => expect(true, isTrue)); +} +'''); + + await expectLater( + () => TestCommand.runWithRoot(tempDir.path, pubGetTimeout: Duration.zero, exitHandler: _throwingExit), + throwsA(isA<_TestExit>().having((e) => e.code, 'code', 1)), + ); + }); + }); +} diff --git a/test/workflow_generator_test.dart b/test/workflow_generator_test.dart index 0c95411..0f60482 100644 --- a/test/workflow_generator_test.dart +++ b/test/workflow_generator_test.dart @@ -2,6 +2,7 @@ import 'dart:convert'; import 'dart:io'; import 'package:test/test.dart'; +import 'package:yaml/yaml.dart'; import 'package:runtime_ci_tooling/src/cli/utils/workflow_generator.dart'; @@ -15,6 +16,7 @@ Map _validConfig({ dynamic lineLength, List? subPackages, Map? runnerOverrides, + Map? webTest, }) { return { 'dart_sdk': dartSdk, @@ -25,9 +27,23 @@ Map _validConfig({ if (lineLength != null) 'line_length': lineLength, if (subPackages != null) 'sub_packages': subPackages, if (runnerOverrides != null) 'runner_overrides': runnerOverrides, + if (webTest != null) 'web_test': webTest, }; } +String _readToolingVersionFromPubspec() { + final pubspec = File('pubspec.yaml'); + if (!pubspec.existsSync()) { + throw StateError('pubspec.yaml not found in current working directory'); + } + final content = pubspec.readAsStringSync(); + final match = RegExp(r'^version:\s*([^\s]+)\s*$', multiLine: true).firstMatch(content); + if (match == null) { + throw StateError('Could not parse version from pubspec.yaml'); + } + return match.group(1)!; +} + void main() { // =========================================================================== // P0: validate() tests @@ -41,61 +57,40 @@ void main() { }); test('null dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': null, - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': null, 'features': {}}); expect(errors, contains('ci.dart_sdk is required')); }); test('non-string dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': 42, - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': 42, 'features': {}}); expect(errors, anyElement(contains('must be a string'))); }); test('empty-string dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '', 'features': {}}); expect(errors, anyElement(contains('non-empty'))); }); test('whitespace-only dart_sdk produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': ' ', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': ' ', 'features': {}}); // After trim the string is empty expect(errors, anyElement(contains('non-empty'))); }); test('dart_sdk with leading/trailing whitespace produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': ' 3.9.2 ', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': ' 3.9.2 ', 'features': {}}); expect(errors, anyElement(contains('whitespace'))); }); test('dart_sdk with trailing newline triggers whitespace error', () { // A trailing \n makes trimmed != sdk, so the whitespace check fires first. - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9.2\n', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '3.9.2\n', 'features': {}}); expect(errors, anyElement(contains('whitespace'))); }); test('dart_sdk with embedded tab (after trim is identity) triggers newlines/tabs error', () { // A tab in the middle: trim() has no effect but the regex catches it. - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9\t.2', - 'features': {}, - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '3.9\t.2', 'features': {}}); expect(errors, anyElement(contains('newlines/tabs'))); }); @@ -143,10 +138,7 @@ void main() { }); test('non-map features produces error', () { - final errors = WorkflowGenerator.validate({ - 'dart_sdk': '3.9.2', - 'features': 'not_a_map', - }); + final errors = WorkflowGenerator.validate({'dart_sdk': '3.9.2', 'features': 'not_a_map'}); expect(errors, anyElement(contains('features must be an object'))); }); @@ -167,17 +159,20 @@ void main() { }); test('all known feature keys pass validation', () { - final errors = WorkflowGenerator.validate(_validConfig( - features: { - 'proto': true, - 'lfs': false, - 'format_check': true, - 'analysis_cache': false, - 'managed_analyze': true, - 'managed_test': false, - 'build_runner': true, - }, - )); + final errors = WorkflowGenerator.validate( + _validConfig( + features: { + 'proto': true, + 'lfs': false, + 'format_check': true, + 'analysis_cache': false, + 'managed_analyze': true, + 'managed_test': false, + 'build_runner': true, + 'web_test': true, + }, + ), + ); expect(errors.where((e) => e.contains('features')), isEmpty); }); @@ -212,9 +207,7 @@ void main() { }); test('valid multi-platform passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(platforms: ['ubuntu', 'macos', 'windows']), - ); + final errors = WorkflowGenerator.validate(_validConfig(platforms: ['ubuntu', 'macos', 'windows'])); expect(errors.where((e) => e.contains('platforms')), isEmpty); }); @@ -239,11 +232,44 @@ void main() { }); test('valid secrets map passes', () { - final errors = WorkflowGenerator.validate( - _validConfig(secrets: {'API_KEY': 'SOME_SECRET'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'SOME_SECRET'})); expect(errors.where((e) => e.contains('secrets')), isEmpty); }); + + test('secrets key with hyphen produces error (unsafe identifier)', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API-KEY': 'SOME_SECRET'})); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets key starting with digit produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'1API_KEY': 'SOME_SECRET'})); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets value with hyphen produces error (unsafe secret name)', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'SOME-SECRET'})); + expect(errors, anyElement(contains('safe secret name'))); + }); + + test('secrets key and value with underscore pass', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'MY_SECRET_NAME'})); + expect(errors.where((e) => e.contains('secrets')), isEmpty); + }); + + test('secrets key with leading underscore produces error (must start with uppercase letter)', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'_API_KEY': 'MY_SECRET'})); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets key with lowercase produces error (uppercase only)', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'api_key': 'MY_SECRET'})); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('secrets value with lowercase produces error (uppercase only)', () { + final errors = WorkflowGenerator.validate(_validConfig(secrets: {'API_KEY': 'my_secret'})); + expect(errors, anyElement(contains('safe secret name'))); + }); }); // ---- personal_access_token_secret ---- @@ -269,6 +295,31 @@ void main() { final errors = WorkflowGenerator.validate(_validConfig()); expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); }); + + test('pat with hyphen produces error (unsafe identifier)', () { + final errors = WorkflowGenerator.validate(_validConfig(pat: 'MY-PAT')); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('pat with special chars produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(pat: r'MY_PAT$')); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('pat GITHUB_TOKEN passes', () { + final errors = WorkflowGenerator.validate(_validConfig(pat: 'GITHUB_TOKEN')); + expect(errors.where((e) => e.contains('personal_access_token_secret')), isEmpty); + }); + + test('pat with leading underscore produces error (must start with uppercase letter)', () { + final errors = WorkflowGenerator.validate(_validConfig(pat: '_MY_PAT')); + expect(errors, anyElement(contains('safe identifier'))); + }); + + test('pat with lowercase produces error (uppercase only)', () { + final errors = WorkflowGenerator.validate(_validConfig(pat: 'my_pat')); + expect(errors, anyElement(contains('safe identifier'))); + }); }); // ---- line_length ---- @@ -292,6 +343,83 @@ void main() { final errors = WorkflowGenerator.validate(_validConfig()); expect(errors.where((e) => e.contains('line_length')), isEmpty); }); + + test('string line_length "abc" produces error (must be digits only)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: 'abc')); + expect(errors, anyElement(contains('digits only'))); + }); + + test('string line_length empty string produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '')); + expect(errors, anyElement(contains('must not be empty'))); + }); + + test('string line_length "+120" produces error (digits only, no sign)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '+120')); + expect(errors, anyElement(contains('digits only'))); + }); + + test('string line_length "-120" produces error (digits only, no sign)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '-120')); + expect(errors, anyElement(contains('digits only'))); + }); + + test('string line_length with leading/trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: ' 120 ')); + expect(errors, anyElement(contains('whitespace'))); + }); + + test('string line_length with embedded newline produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '12\n0')); + expect(errors, anyElement(contains('newlines or control'))); + }); + + test('string line_length "0" produces error (out of range)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '0')); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + + test('string line_length "10001" produces error (out of range)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: '10001')); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + + test('int line_length 0 produces error (out of range)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: 0)); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + + test('int line_length 10001 produces error (out of range)', () { + final errors = WorkflowGenerator.validate(_validConfig(lineLength: 10001)); + expect(errors, anyElement(contains('between 1 and 10000'))); + }); + }); + + // ---- artifact_retention_days ---- + group('artifact_retention_days', () { + test('int artifact_retention_days passes', () { + final config = _validConfig()..['artifact_retention_days'] = 14; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('artifact_retention_days')), isEmpty); + }); + + test('string artifact_retention_days passes', () { + final config = _validConfig()..['artifact_retention_days'] = '30'; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('artifact_retention_days')), isEmpty); + }); + + test('artifact_retention_days empty string produces error', () { + final config = _validConfig()..['artifact_retention_days'] = ''; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('artifact_retention_days string must not be empty'))); + }); + + test('artifact_retention_days above 90 produces error', () { + final config = _validConfig()..['artifact_retention_days'] = 91; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('between 1 and 90'))); + }); }); // ---- sub_packages (Issue #9 validation) ---- @@ -304,117 +432,183 @@ void main() { }); test('sub_packages entry that is not a map produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(subPackages: ['just_a_string']), - ); + final errors = WorkflowGenerator.validate(_validConfig(subPackages: ['just_a_string'])); expect(errors, anyElement(contains('sub_packages entries must be objects'))); }); test('sub_packages with missing name produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'path': 'packages/foo'}, - ]), + _validConfig( + subPackages: [ + {'path': 'packages/foo'}, + ], + ), ); expect(errors, anyElement(contains('name must be a non-empty string'))); }); test('sub_packages with empty name produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': '', 'path': 'packages/foo'}, - ]), + _validConfig( + subPackages: [ + {'name': '', 'path': 'packages/foo'}, + ], + ), ); expect(errors, anyElement(contains('name must be a non-empty string'))); }); + test('sub_packages with name containing unsupported characters produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo bar', 'path': 'packages/foo'}, + ], + ), + ); + expect(errors, anyElement(contains('name contains unsupported characters'))); + }); + test('sub_packages with missing path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo'}, + ], + ), ); expect(errors, anyElement(contains('path must be a non-empty string'))); }); test('sub_packages with empty path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': ''}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': ''}, + ], + ), ); expect(errors, anyElement(contains('path must be a non-empty string'))); }); test('sub_packages path with directory traversal (..) produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': '../../../etc/passwd'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '../../../etc/passwd'}, + ], + ), ); expect(errors, anyElement(contains('must not traverse outside the repo'))); }); test('sub_packages path with embedded traversal produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/../../../etc'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/../../../etc'}, + ], + ), ); expect(errors, anyElement(contains('must not traverse outside the repo'))); }); test('sub_packages absolute path produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': '/usr/local/bin'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '/usr/local/bin'}, + ], + ), ); expect(errors, anyElement(contains('must be a relative repo path'))); }); test('sub_packages path starting with ~ produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': '~/evil'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '~/evil'}, + ], + ), ); expect(errors, anyElement(contains('must be a relative repo path'))); }); + test('sub_packages path "." (repo root) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '.'}, + ], + ), + ); + expect(errors, anyElement(contains('must not be repo root'))); + }); + + test('sub_packages path starting with "-" produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': 'foo', 'path': '--help'}, + ], + ), + ); + expect(errors, anyElement(contains('must not start with "-"'))); + }); + test('sub_packages path with backslashes produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': r'packages\foo'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': r'packages\foo'}, + ], + ), ); expect(errors, anyElement(contains('forward slashes'))); }); test('sub_packages path with unsupported characters produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo bar'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo bar'}, + ], + ), ); expect(errors, anyElement(contains('unsupported characters'))); }); test('sub_packages path with leading/trailing whitespace produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': ' packages/foo '}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': ' packages/foo '}, + ], + ), ); expect(errors, anyElement(contains('whitespace'))); }); + test('sub_packages name with leading/trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + subPackages: [ + {'name': ' foo ', 'path': 'packages/foo'}, + ], + ), + ); + expect(errors, anyElement(contains('name must not have leading/trailing whitespace'))); + }); + test('sub_packages path with trailing tab triggers whitespace error', () { // Trailing \t means trimmed != value, so the whitespace check fires first. final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo\t'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo\t'}, + ], + ), ); expect(errors, anyElement(contains('whitespace'))); }); @@ -422,39 +616,47 @@ void main() { test('sub_packages path with embedded tab triggers newlines/tabs error', () { // Embedded tab: trim() is identity, so newlines/tabs check catches it. final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/f\too'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/f\too'}, + ], + ), ); expect(errors, anyElement(contains('newlines/tabs'))); }); test('sub_packages duplicate name produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo'}, - {'name': 'foo', 'path': 'packages/bar'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo'}, + {'name': 'foo', 'path': 'packages/bar'}, + ], + ), ); expect(errors, anyElement(contains('duplicate name "foo"'))); }); test('sub_packages duplicate path (after normalization) produces error', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'foo', 'path': 'packages/foo'}, - {'name': 'bar', 'path': 'packages/./foo'}, - ]), + _validConfig( + subPackages: [ + {'name': 'foo', 'path': 'packages/foo'}, + {'name': 'bar', 'path': 'packages/./foo'}, + ], + ), ); expect(errors, anyElement(contains('duplicate path'))); }); test('valid sub_packages passes', () { final errors = WorkflowGenerator.validate( - _validConfig(subPackages: [ - {'name': 'core', 'path': 'packages/core'}, - {'name': 'api', 'path': 'packages/api'}, - ]), + _validConfig( + subPackages: [ + {'name': 'core', 'path': 'packages/core'}, + {'name': 'api', 'path': 'packages/api'}, + ], + ), ); expect(errors.where((e) => e.contains('sub_packages')), isEmpty); }); @@ -475,41 +677,468 @@ void main() { }); test('runner_overrides with invalid platform key produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'solaris': 'my-runner'}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'solaris': 'my-runner'})); expect(errors, anyElement(contains('invalid platform key "solaris"'))); }); test('runner_overrides with empty string value produces error', () { - final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': ''}), - ); + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': ''})); expect(errors, anyElement(contains('must be a non-empty string'))); }); + test('runner_overrides value with surrounding whitespace produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': ' custom-runner '})); + expect(errors, anyElement(contains('leading/trailing whitespace'))); + }); + test('valid runner_overrides passes', () { + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'custom-runner-label'})); + expect(errors.where((e) => e.contains('runner_overrides')), isEmpty); + }); + + test('runner_overrides value with newline produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'runner\nlabel'})); + expect(errors, anyElement(contains('newlines, control chars'))); + }); + + test('runner_overrides value with tab produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'runner\tlabel'})); + expect(errors, anyElement(contains('newlines, control chars'))); + }); + + test('runner_overrides value with YAML-injection char produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': 'runner:label'})); + expect(errors, anyElement(contains('unsafe YAML chars'))); + }); + + test('runner_overrides value with dollar sign produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(runnerOverrides: {'ubuntu': r'runner$label'})); + expect(errors, anyElement(contains('unsafe YAML chars'))); + }); + + test('runner_overrides value with hyphen and dot passes', () { final errors = WorkflowGenerator.validate( - _validConfig(runnerOverrides: {'ubuntu': 'custom-runner-label'}), + _validConfig(runnerOverrides: {'ubuntu': 'runtime-ubuntu-24.04-x64-256gb'}), ); expect(errors.where((e) => e.contains('runner_overrides')), isEmpty); }); }); + // ---- web_test ---- + group('web_test', () { + test('non-map web_test produces error', () { + final config = _validConfig(); + config['web_test'] = 'not_a_map'; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('web_test must be an object'))); + }); + + test('null web_test is fine (optional)', () { + final errors = WorkflowGenerator.validate(_validConfig()); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('web_test.concurrency non-int produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 'fast'})); + expect(errors, anyElement(contains('concurrency must be an integer'))); + }); + + test('web_test.concurrency zero produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 0})); + expect(errors, anyElement(contains('between 1 and 32'))); + }); + + test('web_test.concurrency negative produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': -1})); + expect(errors, anyElement(contains('between 1 and 32'))); + }); + + test('web_test.concurrency exceeds upper bound produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 33})); + expect(errors, anyElement(contains('between 1 and 32'))); + }); + + test('web_test.concurrency double/float produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 3.14})); + expect(errors, anyElement(contains('concurrency must be an integer'))); + }); + + test('web_test.concurrency valid int passes', () { + final errors = WorkflowGenerator.validate( + _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}, webTest: {'concurrency': 4}), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('web_test.concurrency at upper bound (32) passes', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'concurrency': 32})); + expect(errors.where((e) => e.contains('concurrency')), isEmpty); + }); + + test('web_test.concurrency null is fine (defaults to 1)', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {})); + expect(errors.where((e) => e.contains('concurrency')), isEmpty); + }); + + test('web_test.paths non-list produces error', () { + final errors = WorkflowGenerator.validate(_validConfig(webTest: {'paths': 'not_a_list'})); + expect(errors, anyElement(contains('paths must be an array'))); + }); + + test('web_test.paths with empty string produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [''], + }, + ), + ); + expect(errors, anyElement(contains('must be a non-empty string'))); + }); + + test('web_test.paths with absolute path produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['/etc/passwd'], + }, + ), + ); + expect(errors, anyElement(contains('must be a relative repo path'))); + }); + + test('web_test.paths with traversal produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['../../../etc/passwd'], + }, + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); + + test('web_test.paths with embedded traversal (test/web/../../../etc/passwd) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/../../../etc/passwd'], + }, + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); + + test('web_test.paths with shell metacharacters (\$(curl evil)) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [r'$(curl evil)'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with shell metacharacters (; rm -rf /) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['; rm -rf /'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with single quote produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ["test/web/foo'bar_test.dart"], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths duplicate (after normalization) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/./foo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); + + test('web_test.paths with backslashes produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [r'test\web\foo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('forward slashes'))); + }); + + test('web_test.paths with unsupported characters produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/web test/foo.dart'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with leading whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [' test/web/foo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('whitespace'))); + }); + + test('web_test.paths with tilde produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['~/test/foo.dart'], + }, + ), + ); + expect(errors, anyElement(contains('must be a relative repo path'))); + }); + + test('web_test.paths "." (repo root) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['.'], + }, + ), + ); + expect(errors, anyElement(contains('must not be repo root'))); + }); + + test('web_test.paths starting with "-" produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['--help'], + }, + ), + ); + expect(errors, anyElement(contains('must not start with "-"'))); + }); + + test('web_test.paths with newline produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/foo\nbar.dart'], + }, + ), + ); + expect(errors, anyElement(contains('newlines/tabs'))); + }); + + test('web_test.paths with embedded traversal that escapes repo produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/../../../etc/passwd'], + }, + ), + ); + expect(errors, anyElement(contains('must not traverse outside the repo'))); + }); + + test('web_test.paths with embedded .. that stays in repo is fine', () { + // test/web/../../etc/passwd normalizes to etc/passwd (still inside repo) + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/../../etc/passwd'], + }, + ), + ); + expect(errors.where((e) => e.contains('traverse')), isEmpty); + }); + + test('web_test.paths with shell metacharacter \$ produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': [r'$(curl evil.com)'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with shell metacharacter ; produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/foo; rm -rf /'], + }, + ), + ); + expect(errors, anyElement(contains('unsupported characters'))); + }); + + test('web_test.paths with duplicate paths produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/foo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); + + test('web_test.paths with duplicate normalized paths produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/web/./foo_test.dart', 'test/web/foo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('duplicate path'))); + }); + + test('web_test.paths with trailing whitespace produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/web/foo_test.dart '], + }, + ), + ); + expect(errors, anyElement(contains('whitespace'))); + }); + + test('web_test.paths with tab produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + webTest: { + 'paths': ['test/web/\tfoo_test.dart'], + }, + ), + ); + expect(errors, anyElement(contains('newlines/tabs'))); + }); + + test('valid web_test.paths passes', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'paths': ['test/web/foo_test.dart', 'test/web/bar_test.dart'], + }, + ), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('empty web_test.paths list is fine', () { + final errors = WorkflowGenerator.validate( + _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}, webTest: {'paths': []}), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('valid full web_test config passes', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('web_test with unknown key (typo) produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig(webTest: {'concurreny': 2}), // typo: concurreny + ); + expect(errors, anyElement(contains('unknown key "concurreny"'))); + }); + + test('cross-validation: web_test config present but feature disabled produces error', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': false}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), + ); + expect(errors, anyElement(contains('web_test config is present but ci.features.web_test is not enabled'))); + }); + + test('cross-validation: web_test feature enabled but config wrong type produces error', () { + final config = _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}); + config['web_test'] = 'yes'; + final errors = WorkflowGenerator.validate(config); + expect(errors, anyElement(contains('web_test must be an object'))); + }); + + test('cross-validation: web_test feature enabled with no config object (null) is allowed, uses defaults', () { + final errors = WorkflowGenerator.validate( + _validConfig( + features: {'proto': false, 'lfs': false, 'web_test': true}, + // webTest: null (omitted) — config is optional when feature is enabled + ), + ); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + + test('cross-validation: web_test feature enabled with explicit null config is allowed', () { + final config = _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true}); + config['web_test'] = null; + final errors = WorkflowGenerator.validate(config); + expect(errors.where((e) => e.contains('web_test')), isEmpty); + }); + }); + // ---- fully valid config produces no errors ---- test('fully valid config produces no errors', () { - final errors = WorkflowGenerator.validate(_validConfig( - dartSdk: '3.9.2', - features: {'proto': true, 'lfs': false}, - platforms: ['ubuntu', 'macos'], - secrets: {'API_KEY': 'MY_SECRET'}, - pat: 'MY_PAT', - lineLength: 120, - subPackages: [ - {'name': 'core', 'path': 'packages/core'}, - ], - runnerOverrides: {'ubuntu': 'custom-runner'}, - )); + final errors = WorkflowGenerator.validate( + _validConfig( + dartSdk: '3.9.2', + features: {'proto': true, 'lfs': false, 'web_test': true}, + platforms: ['ubuntu', 'macos'], + secrets: {'API_KEY': 'MY_SECRET'}, + pat: 'MY_PAT', + lineLength: 120, + subPackages: [ + {'name': 'core', 'path': 'packages/core'}, + ], + runnerOverrides: {'ubuntu': 'custom-runner'}, + webTest: { + 'concurrency': 2, + 'paths': ['test/web/'], + }, + ), + ); expect(errors, isEmpty); }); @@ -518,7 +1147,7 @@ void main() { final errors = WorkflowGenerator.validate({ // missing dart_sdk, missing features }); - expect(errors.length, greaterThanOrEqualTo(2)); + expect(errors.length, equals(2)); expect(errors, anyElement(contains('dart_sdk'))); expect(errors, anyElement(contains('features'))); }); @@ -545,21 +1174,21 @@ void main() { test('returns null when config.json exists but has no "ci" key', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'repo_name': 'test_repo', - })); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'repo_name': 'test_repo'})); final result = WorkflowGenerator.loadCiConfig(tempDir.path); expect(result, isNull); }); test('returns the ci map when config.json has a valid "ci" section', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'ci': { - 'dart_sdk': '3.9.2', - 'features': {'proto': true}, - }, - })); + File('${configDir.path}/config.json').writeAsStringSync( + json.encode({ + 'ci': { + 'dart_sdk': '3.9.2', + 'features': {'proto': true}, + }, + }), + ); final result = WorkflowGenerator.loadCiConfig(tempDir.path); expect(result, isNotNull); expect(result, isA>()); @@ -578,9 +1207,7 @@ void main() { test('throws StateError when "ci" is not a Map', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'ci': 'not_a_map', - })); + File('${configDir.path}/config.json').writeAsStringSync(json.encode({'ci': 'not_a_map'})); expect( () => WorkflowGenerator.loadCiConfig(tempDir.path), throwsA(isA().having((e) => e.message, 'message', contains('object'))), @@ -589,13 +1216,703 @@ void main() { test('throws StateError when "ci" is a list instead of a map', () { final configDir = Directory('${tempDir.path}/.runtime_ci')..createSync(); - File('${configDir.path}/config.json').writeAsStringSync(json.encode({ - 'ci': [1, 2, 3], - })); + File('${configDir.path}/config.json').writeAsStringSync( + json.encode({ + 'ci': [1, 2, 3], + }), + ); + expect(() => WorkflowGenerator.loadCiConfig(tempDir.path), throwsA(isA())); + }); + }); + + // =========================================================================== + // P0: render() — validation guard and web_test output integration tests + // =========================================================================== + group('WorkflowGenerator.render()', () { + Map _minimalValidConfig({ + bool webTest = false, + Map? webTestConfig, + Map? featureOverrides, + List? platforms, + }) { + final features = { + 'proto': false, + 'lfs': false, + 'format_check': false, + 'analysis_cache': false, + 'managed_analyze': false, + 'managed_test': false, + 'build_runner': false, + 'web_test': webTest, + }; + if (featureOverrides != null) { + features.addAll(featureOverrides); + } + features['web_test'] = webTest; + return _validConfig( + dartSdk: '3.9.2', + features: features, + platforms: platforms ?? ['ubuntu'], + webTest: webTestConfig, + ); + } + + // ---- render() validation guard (defense-in-depth) ---- + test('render throws StateError when config is invalid (missing dart_sdk)', () { + final gen = WorkflowGenerator(ciConfig: {'features': {}}, toolingVersion: '0.0.0-test'); expect( - () => WorkflowGenerator.loadCiConfig(tempDir.path), - throwsA(isA()), + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf(contains('Cannot render with invalid config'), contains('dart_sdk')), + ), + ), + ); + }); + + test('render throws StateError when config has multiple validation errors', () { + final gen = WorkflowGenerator(ciConfig: {}, toolingVersion: '0.0.0-test'); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf(contains('Cannot render with invalid config'), contains('dart_sdk'), contains('features')), + ), + ), + ); + }); + + test('render throws StateError when config has invalid web_test type', () { + final gen = WorkflowGenerator( + ciConfig: _validConfig(features: {'proto': false, 'lfs': false, 'web_test': true})..['web_test'] = 'yes', + toolingVersion: '0.0.0-test', + ); + expect( + () => gen.render(), + throwsA( + isA().having( + (e) => e.message, + 'message', + allOf(contains('Cannot render with invalid config'), contains('web_test must be an object')), + ), + ), + ); + }); + + test('render succeeds on valid config', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: false), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, isNotEmpty); + final parsed = loadYaml(rendered) as YamlMap; + expect(parsed, isA()); + expect(parsed.containsKey('name'), isTrue); + expect(parsed['name'], equals('CI')); + expect(parsed.containsKey('jobs'), isTrue); + final jobs = parsed['jobs'] as YamlMap; + expect(jobs, isA()); + expect(jobs.containsKey('pre-check'), isTrue); + expect(jobs.containsKey('analyze-and-test'), isTrue); + }); + + test('web_test=false: rendered output does not contain web-test job', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: false), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, isNot(contains('web-test:'))); + expect(rendered, isNot(contains('dart test -p chrome'))); + }); + + test('web_test=true with omitted config uses default concurrency and no explicit paths', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: true), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, contains('web-test:')); + expect(rendered, contains('dart test -p chrome')); + expect(rendered, contains('--concurrency=1')); + expect(rendered, isNot(contains("'test/"))); + }); + + test('web_test=true with paths: rendered output includes path args', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + webTestConfig: { + 'paths': ['test/web/foo_test.dart'], + 'concurrency': 2, + }, + ), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains("'test/web/foo_test.dart'")); + expect(rendered, contains('--concurrency=2')); + expect(rendered, contains('-- \'test/web/foo_test.dart\'')); + }); + + test('web_test=true with concurrency at upper bound (32): rendered output uses 32', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, webTestConfig: {'concurrency': 32}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('--concurrency=32')); + }); + + test('rendered output parses as valid YAML with jobs map', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + + expect(parsed.containsKey('name'), isTrue); + expect(parsed['name'], equals('CI')); + + final on = parsed['on'] as YamlMap; + expect(on.containsKey('push'), isTrue); + expect(on.containsKey('pull_request'), isTrue); + final pushBranches = (on['push'] as YamlMap)['branches'] as YamlList; + expect(pushBranches, contains('main')); + + final jobs = parsed['jobs'] as YamlMap; + expect(jobs.containsKey('pre-check'), isTrue); + + final preCheck = jobs['pre-check'] as YamlMap; + expect(preCheck['runs-on'], equals('ubuntu-latest')); + final steps = preCheck['steps'] as YamlList; + expect(steps.length, greaterThanOrEqualTo(2)); + final firstStep = steps[0] as YamlMap; + expect('${firstStep['uses']}', contains('actions/checkout')); + }); + + test('rendered workflow stays in sync with committed .github/workflows/ci.yaml', () { + final ciConfig = WorkflowGenerator.loadCiConfig(Directory.current.path); + expect(ciConfig, isNotNull, reason: 'Repository CI config must be present'); + + final goldenPath = '.github/workflows/ci.yaml'; + final goldenFile = File(goldenPath); + expect(goldenFile.existsSync(), isTrue, reason: 'Committed workflow golden must exist'); + + final existingContent = goldenFile.readAsStringSync(); + final toolingVersion = _readToolingVersionFromPubspec(); + final rendered = WorkflowGenerator( + ciConfig: ciConfig!, + toolingVersion: toolingVersion, + ).render(existingContent: existingContent); + + String normalize(String input) => '${input.replaceAll('\r\n', '\n').trimRight()}\n'; + + expect( + normalize(rendered), + equals(normalize(existingContent)), + reason: 'Generated workflow drifted from committed file. Re-run workflow generation and commit updated output.', + ); + }); + + test('managed_test: upload step uses success() || failure() not cancelled', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}), + toolingVersion: '0.0.0-test', ); + final rendered = gen.render(); + expect(rendered, contains('success() || failure()')); + expect(rendered, isNot(contains('always()'))); + }); + + test('managed_test: Test step has pipefail and tee for correct exit propagation', () { + // Single-platform and multi-platform must share identical test step + // structure: pipefail ensures test exit code propagates through tee. + final single = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['ubuntu']), + toolingVersion: '0.0.0-test', + ).render(); + final multi = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['ubuntu', 'macos']), + toolingVersion: '0.0.0-test', + ).render(); + for (final rendered in [single, multi]) { + expect(rendered, contains('set -o pipefail')); + expect(rendered, contains('tee "')); + expect(rendered, contains('console.log"')); + expect(rendered, contains('manage_cicd test 2>&1')); + } + }); + + test('feature flags render expected snippets', () { + final cases = >[ + {'feature': 'proto', 'snippet': 'Install protoc'}, + {'feature': 'lfs', 'snippet': 'lfs: true'}, + {'feature': 'format_check', 'snippet': 'auto-format:'}, + {'feature': 'analysis_cache', 'snippet': 'Cache Dart analysis'}, + {'feature': 'managed_analyze', 'snippet': 'runtime_ci_tooling:manage_cicd analyze'}, + {'feature': 'managed_test', 'snippet': 'runtime_ci_tooling:manage_cicd test'}, + {'feature': 'build_runner', 'snippet': 'Run build_runner'}, + ]; + + for (final c in cases) { + final feature = c['feature']!; + final snippet = c['snippet']!; + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {feature: true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains(snippet), reason: 'Feature "$feature" should render "$snippet".'); + } + }); + + test('build_runner=false omits build_runner step', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'build_runner': false}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, isNot(contains('Run build_runner'))); + }); + + test('multi-platform render emits analyze + matrix test jobs', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(platforms: ['ubuntu', 'macos']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + + expect(jobs.containsKey('analyze'), isTrue); + expect(jobs.containsKey('test'), isTrue); + expect(jobs.containsKey('analyze-and-test'), isFalse); + + final testJob = jobs['test'] as YamlMap; + final strategy = testJob['strategy'] as YamlMap; + final matrix = strategy['matrix'] as YamlMap; + final include = matrix['include'] as YamlList; + expect(include.length, equals(2)); + }); + + // ---- render(existingContent) / _preserveUserSections ---- + group('render(existingContent) preserves user sections', () { + test('user section content is preserved when existingContent has custom lines in a user block', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + // Append a user block with content so extraction finds it (first occurrence is empty) + const customBlock = ''' +# --- BEGIN USER: pre-test --- + - name: Custom pre-test step + run: echo "user-added" +# --- END USER: pre-test --- +'''; + final existing = base + customBlock; + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('Custom pre-test step')); + expect(rendered, contains('user-added')); + expect(rendered, contains('# --- BEGIN USER: pre-test ---')); + expect(rendered, contains('# --- END USER: pre-test ---')); + }); + + test('CRLF normalization: existing content with \\r\\n still preserves sections', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + const customContent = '\r\n - run: echo "crlf-test"\r\n'; + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---$customContent# --- END USER: pre-test ---', + ); + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('crlf-test')); + expect(rendered, contains('# --- BEGIN USER: pre-test ---')); + }); + + test('multiple user sections preserve independently', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + var existing = base; + existing = existing.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n - run: echo pre\n# --- END USER: pre-test ---', + ); + existing = existing.replaceFirst( + '# --- BEGIN USER: post-test ---\n# --- END USER: post-test ---', + '# --- BEGIN USER: post-test ---\n - run: echo post\n# --- END USER: post-test ---', + ); + existing = existing.replaceFirst( + '# --- BEGIN USER: extra-jobs ---\n# --- END USER: extra-jobs ---', + '# --- BEGIN USER: extra-jobs ---\n custom-job:\n runs-on: ubuntu-latest\n# --- END USER: extra-jobs ---', + ); + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('echo pre')); + expect(rendered, contains('echo post')); + expect(rendered, contains('custom-job:')); + expect(rendered, contains('runs-on: ubuntu-latest')); + }); + + test('empty/whitespace-only existing user section does not overwrite rendered section', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + // Existing has pre-test with only whitespace; post-test has real content + final existing = base + .replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n \n \t \n# --- END USER: pre-test ---', + ) + .replaceFirst( + '# --- BEGIN USER: post-test ---\n# --- END USER: post-test ---', + '# --- BEGIN USER: post-test ---\n - run: echo kept\n# --- END USER: post-test ---', + ); + final rendered = gen.render(existingContent: existing); + // pre-test: whitespace-only was skipped, so rendered keeps empty placeholder + expect(rendered, contains('# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---')); + // post-test: real content was preserved + expect(rendered, contains('echo kept')); + }); + + test('unknown section name in existing content is silently ignored', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + // Add a user section that doesn't exist in the skeleton + final existing = '$base\n# --- BEGIN USER: nonexistent ---\n custom: stuff\n# --- END USER: nonexistent ---\n'; + final rendered = gen.render(existingContent: existing); + // The unknown section content should not appear in the rendered output + // (there's no matching placeholder to insert it into) + expect(rendered, isNot(contains('custom: stuff'))); + // Known sections still render correctly + expect(rendered, contains('# --- BEGIN USER: pre-test ---')); + }); + + test('malformed section markers (missing END) are ignored', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + // Inject a BEGIN without matching END — regex won't match, so it's ignored + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n - run: echo orphan\n', + ); + final rendered = gen.render(existingContent: existing); + // The orphaned content won't be extracted (regex requires matched pair) + expect(rendered, isNot(contains('echo orphan'))); + }); + + test('mismatched section names (BEGIN X / END Y) are ignored', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n - run: echo mismatch\n# --- END USER: post-test ---', + ); + final rendered = gen.render(existingContent: existing); + // Mismatched names: regex backreference \1 won't match, so nothing extracted + expect(rendered, isNot(contains('echo mismatch'))); + }); + + test('section content with regex-special characters is preserved verbatim', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + // Content with regex special chars: $, (), *, +, ?, |, ^, {, } + const specialContent = r' - run: echo "${{ matrix.os }}" && test [[ "$(whoami)" == "ci" ]]'; + final existing = base.replaceFirst( + '# --- BEGIN USER: pre-test ---\n# --- END USER: pre-test ---', + '# --- BEGIN USER: pre-test ---\n$specialContent\n# --- END USER: pre-test ---', + ); + final rendered = gen.render(existingContent: existing); + expect(rendered, contains(specialContent)); + }); + + test('duplicate user section markers in existing content: last matched section wins', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final base = gen.render(); + final existing = + ''' +$base +# --- BEGIN USER: pre-test --- + - run: echo first +# --- END USER: pre-test --- +# --- BEGIN USER: pre-test --- + - run: echo second +# --- END USER: pre-test --- +'''; + final rendered = gen.render(existingContent: existing); + expect(rendered, contains('echo second')); + expect(rendered, isNot(contains('echo first'))); + }); + + test('null existingContent produces same output as no existingContent', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final withoutExisting = gen.render(); + final withNull = gen.render(existingContent: null); + expect(withNull, equals(withoutExisting)); + }); + + test('existingContent with no user sections produces same output as fresh render', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final fresh = gen.render(); + // Use a completely unrelated string as existing content + final rendered = gen.render(existingContent: 'name: SomeOtherWorkflow\non: push'); + expect(rendered, equals(fresh)); + }); + }); + + // ---- render() feature flag combinations ---- + group('feature flag combinations', () { + test('format_check + web_test: web-test needs includes auto-format', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'format_check': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('pre-check')); + expect(needs, contains('auto-format')); + }); + + test('format_check renders repo-wide dart format command (.)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'format_check': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('run: dart format --line-length 120 .')); + }); + + test('git-config steps use env indirection (GH_PAT) instead of inline secrets in run', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, contains('GH_PAT: \${{ secrets.')); + expect(rendered, contains('echo "::add-mask::\${GH_PAT}"')); + expect(rendered, isNot(contains('TOKEN="\${{ secrets.'))); + }); + + test('web_test without format_check: web-test needs omits auto-format', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(webTest: true), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('pre-check')); + expect(needs, isNot(contains('auto-format'))); + }); + + test('build_runner + web_test: web-test job contains build_runner step', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'build_runner': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + // Find the web-test job section and check it contains build_runner + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('Run build_runner')); + }); + + test('proto + web_test: web-test job contains proto steps', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'proto': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('Install protoc')); + expect(afterWebTest, contains('Verify proto files')); + }); + + test('multi-platform + web_test: web-test depends on analyze (not test)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, platforms: ['ubuntu', 'macos']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('analyze')); + expect(needs, isNot(contains('test'))); + expect(needs, isNot(contains('analyze-and-test'))); + }); + + test('single-platform + web_test: web-test depends on analyze-and-test', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, platforms: ['ubuntu']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final webTestJob = parsed['jobs']['web-test'] as YamlMap; + final needs = (webTestJob['needs'] as YamlList).toList(); + expect(needs, contains('pre-check')); + expect(needs, contains('analyze-and-test')); + }); + + test('single-platform uses explicit PLATFORM_ID from single_platform_id context', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['windows-x64']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final job = parsed['jobs']['analyze-and-test'] as YamlMap; + final steps = (job['steps'] as YamlList).toList(); + final testStep = steps.firstWhere((s) => s is YamlMap && s['name'] == 'Test', orElse: () => null); + expect(testStep, isNotNull); + final env = (testStep as YamlMap)['env'] as YamlMap; + expect(env['PLATFORM_ID'], equals('windows-x64')); + }); + + test('secrets render in web-test job env block', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true)..['secrets'] = {'API_KEY': 'MY_SECRET'}, + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('API_KEY')); + expect(afterWebTest, contains('MY_SECRET')); + }); + + test('lfs + web_test: web-test checkout has lfs: true', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(webTest: true, featureOverrides: {'lfs': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final webTestStart = rendered.indexOf('web-test:'); + expect(webTestStart, isNot(-1)); + final afterWebTest = rendered.substring(webTestStart); + expect(afterWebTest, contains('lfs: true')); + }); + + test('managed_test in multi-platform: test job uses managed test command', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}, platforms: ['ubuntu', 'macos']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final testJob = parsed['jobs']['test'] as YamlMap; + final steps = (testJob['steps'] as YamlList).toList(); + final testStep = steps.firstWhere((s) => s is YamlMap && s['name'] == 'Test', orElse: () => null); + expect(testStep, isNotNull); + expect((testStep as YamlMap)['run'], contains('manage_cicd test')); + }); + + test('all features enabled renders valid YAML', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig( + webTest: true, + webTestConfig: { + 'concurrency': 4, + 'paths': ['test/web/'], + }, + featureOverrides: { + 'proto': true, + 'lfs': true, + 'format_check': true, + 'analysis_cache': true, + 'managed_analyze': true, + 'managed_test': true, + 'build_runner': true, + }, + platforms: ['ubuntu', 'macos'], + )..['secrets'] = {'API_KEY': 'MY_SECRET'}, + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + // Must parse as valid YAML + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + expect(jobs.containsKey('pre-check'), isTrue); + expect(jobs.containsKey('auto-format'), isTrue); + expect(jobs.containsKey('analyze'), isTrue); + expect(jobs.containsKey('test'), isTrue); + expect(jobs.containsKey('web-test'), isTrue); + // web-test should have Chrome setup + final webTestSteps = (jobs['web-test']['steps'] as YamlList).toList(); + final chromeStep = webTestSteps.firstWhere( + (s) => s is YamlMap && s['name'] == 'Setup Chrome', + orElse: () => null, + ); + expect(chromeStep, isNotNull); + }); + + test('no features enabled (all false) renders minimal valid YAML', () { + final gen = WorkflowGenerator(ciConfig: _minimalValidConfig(), toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final jobs = parsed['jobs'] as YamlMap; + expect(jobs.containsKey('pre-check'), isTrue); + expect(jobs.containsKey('analyze-and-test'), isTrue); + expect(jobs.containsKey('auto-format'), isFalse); + expect(jobs.containsKey('web-test'), isFalse); + // Should NOT contain feature-gated content + expect(rendered, isNot(contains('Install protoc'))); + expect(rendered, isNot(contains('lfs: true'))); + expect(rendered, isNot(contains('auto-format'))); + expect(rendered, isNot(contains('Run build_runner'))); + }); + + test('sub_packages render in single-platform job', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig() + ..['sub_packages'] = [ + {'name': 'core', 'path': 'packages/core'}, + {'name': 'api', 'path': 'packages/api'}, + ], + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('Analyze (core)')); + expect(rendered, contains('Analyze (api)')); + expect(rendered, contains('working-directory: packages/core')); + expect(rendered, contains('working-directory: packages/api')); + }); + + test('runner_overrides change runs-on in single-platform', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig()..['runner_overrides'] = {'ubuntu': 'my-custom-runner'}, + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + final parsed = loadYaml(rendered) as YamlMap; + final job = parsed['jobs']['analyze-and-test'] as YamlMap; + expect(job['runs-on'], equals('my-custom-runner')); + }); + + test('artifact retention-days policy applied consistently (7 days)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(featureOverrides: {'managed_test': true}), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('retention-days: 7')); + expect(rendered, contains('Policy: test artifact retention-days = 7')); + }); + + test('artifact retention-days can be overridden via ci.artifact_retention_days', () { + final ci = _minimalValidConfig(featureOverrides: {'managed_test': true}); + ci['artifact_retention_days'] = 14; + final gen = WorkflowGenerator(ciConfig: ci, toolingVersion: '0.0.0-test'); + final rendered = gen.render(); + expect(rendered, contains('retention-days: 14')); + expect(rendered, contains('Policy: test artifact retention-days = 14')); + }); + + test('Windows pub-cache path uses format for Dart default (%LOCALAPPDATA%\\Pub\\Cache)', () { + final gen = WorkflowGenerator( + ciConfig: _minimalValidConfig(platforms: ['windows']), + toolingVersion: '0.0.0-test', + ); + final rendered = gen.render(); + expect(rendered, contains('Pub')); + expect(rendered, contains('Cache')); + expect(rendered, contains('env.LOCALAPPDATA')); + expect(rendered, contains("'~/.pub-cache'")); + expect(rendered, contains("runner.os == 'Windows'")); + }); }); }); }