Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 91 additions & 3 deletions .github/workflows/avalanche-self-hosted-serial.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
name: Avalanche Self-Hosted Serial
name: ChainSmith CI Tests for Avalanche

on:
workflow_dispatch:
inputs:
test_profile:
description: 'Which Avalanche suite set to run serially'
description: 'Which Avalanche tests to run'
required: true
default: full
type: choice
Expand All @@ -22,7 +22,7 @@ concurrency:

jobs:
avalanche-self-hosted:
name: Avalanche Serial (${{ inputs.test_profile || 'full' }})
name: Avalanche Tests (${{ inputs.test_profile || 'full' }})
runs-on:
- self-hosted
- linux
Expand Down Expand Up @@ -122,12 +122,15 @@ jobs:
printf 'CHAIN_ENV=%s\n' "${CHAIN_ENV}" > .env

- name: Run Avalanche platform smoke test
if: inputs.test_profile == 'smoke' || inputs.test_profile == 'full'
run: pnpm test:avalanche:platform

- name: Run basic smoke test
if: inputs.test_profile == 'smoke' || inputs.test_profile == 'full'
run: pnpm test:basic

- name: Run EVM RPC smoke test
if: inputs.test_profile == 'smoke' || inputs.test_profile == 'full'
run: pnpm test:rpc:evm

- name: Run PoA lifecycle test
Expand Down Expand Up @@ -185,3 +188,88 @@ jobs:
set -euo pipefail
chmod +x chains/avalanche-cli-local/stop-multinode.sh
chains/avalanche-cli-local/stop-multinode.sh clean || true

deploy-report:
name: Deploy Avalanche Report to GitHub Pages
needs: avalanche-self-hosted
runs-on: ubuntu-latest
concurrency:
group: deploy-pages
cancel-in-progress: false
permissions:
contents: write
env:
CHAIN_ENV: avalanche-local
TEST_PROFILE: ${{ inputs.test_profile || 'full' }}

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Download test report artifact
uses: actions/download-artifact@v4
with:
name: avalanche-self-hosted-report
path: ./report

- name: Deploy to gh-pages branch
shell: bash
run: |
set -euo pipefail
REPORT_DIR="${CHAIN_ENV}/${{ github.run_id }}"
MAX_REPORTS=20

git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"

git clone --branch gh-pages --single-branch \
"https://x-access-token:${{ github.token }}@github.com/${{ github.repository }}.git" \
gh-pages 2>/dev/null || {
mkdir gh-pages
cd gh-pages
git init
git checkout -b gh-pages
git remote add origin "https://x-access-token:${{ github.token }}@github.com/${{ github.repository }}.git"
cd ..
}

mkdir -p "gh-pages/${REPORT_DIR}"
cp -r ./report/* "gh-pages/${REPORT_DIR}/"
echo "$(date -u '+%Y-%m-%d %H:%M')" > "gh-pages/${REPORT_DIR}/timestamp.txt"
echo "${TEST_PROFILE}" > "gh-pages/${REPORT_DIR}/profile.txt"
echo "✅ Report copied to ${REPORT_DIR}/"

cd gh-pages
if [ -d "${CHAIN_ENV}" ]; then
REPORT_COUNT=$(ls -1d ${CHAIN_ENV}/*/ 2>/dev/null | wc -l)
if [ "${REPORT_COUNT}" -gt "${MAX_REPORTS}" ]; then
REMOVE_COUNT=$((REPORT_COUNT - MAX_REPORTS))
ls -1d ${CHAIN_ENV}/*/ | head -n "${REMOVE_COUNT}" | while read -r dir; do
echo "🗑️ Removing old report: ${dir}"
rm -rf "${dir}"
done
fi
fi

echo "📑 Generating test report index..."
chmod +x "$GITHUB_WORKSPACE/scripts/generate-report-index.sh"
bash "$GITHUB_WORKSPACE/scripts/generate-report-index.sh"

git add -A
git commit -m "Deploy Avalanche self-hosted report: ${CHAIN_ENV}/#${{ github.run_id }}" || echo "No changes to commit"
git push origin gh-pages

REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
PAGES_BASE="https://${{ github.repository_owner }}.github.io/${REPO_NAME}"
REPORT_URL="${PAGES_BASE}/${REPORT_DIR}/"
INDEX_URL="${PAGES_BASE}/"

echo "## 📊 Avalanche Report Deployed" >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "| | |" >> "$GITHUB_STEP_SUMMARY"
echo "|---|---|" >> "$GITHUB_STEP_SUMMARY"
echo "| **Chain** | \`${CHAIN_ENV}\` |" >> "$GITHUB_STEP_SUMMARY"
echo "| **Profile** | \`${TEST_PROFILE}\` |" >> "$GITHUB_STEP_SUMMARY"
echo "| **Run ID** | \`${{ github.run_id }}\` |" >> "$GITHUB_STEP_SUMMARY"
echo "| **Report** | [${REPORT_DIR}](${REPORT_URL}) |" >> "$GITHUB_STEP_SUMMARY"
echo "| **Index** | [View All Reports](${INDEX_URL}) |" >> "$GITHUB_STEP_SUMMARY"
100 changes: 77 additions & 23 deletions src/utils/performance-utils.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import fs from 'fs';
import { expect } from 'chai';
import type { PerformanceRunMetrics } from '../blockchain/test-library/PerformanceTestBuilder';

export interface PerformanceExpectConfig {
tokenTransfer: {
Expand All @@ -9,6 +10,27 @@ export interface PerformanceExpectConfig {
};
}

export type PerformanceResultEntry = PerformanceRunMetrics | null;

function collectMetricValues(
results: PerformanceResultEntry[],
selector: (entry: PerformanceRunMetrics) => number
): number[] {
return results.filter((entry): entry is PerformanceRunMetrics => entry !== null).map(selector);
}

function summarizeMetric(values: number[]): { avg: number; min: number; max: number } {
if (values.length === 0) {
return { avg: 0, min: 0, max: 0 };
}

return {
avg: values.reduce((a, b) => a + b, 0) / values.length,
min: Math.min(...values),
max: Math.max(...values),
};
}

/**
* Get configured performance run count.
*
Expand Down Expand Up @@ -37,20 +59,24 @@ export function logPerformanceExpect(expectData: PerformanceExpectConfig): void
* @param performanceExpectations - Performance expectations configuration
* @returns Analysis data including statistics and success rate
*/
export function analyzePerformanceResults(results: number[], performanceExpectations: any): any {
export function analyzePerformanceResults(results: PerformanceResultEntry[], performanceExpectations: any): any {
// Get threshold for proper categorization
const threshold = performanceExpectations?.tokenTransfer?.threshold ?? 10000;
const expectedPercentage = performanceExpectations?.tokenTransfer?.percentage ?? 90;

// Categorize runs based on threshold
const successfulRuns = results.filter(r => r > 0 && r <= threshold);
const failedRuns = results.filter(r => r === -1 || r > threshold);
const successfulRuns = results.filter(
(entry): entry is PerformanceRunMetrics => entry !== null && entry.endToEndLatencyMs <= threshold
);
const failedRuns = results.filter(entry => entry === null || entry.endToEndLatencyMs > threshold);

// Calculate statistics from all valid results (excluding -1 for failed runs)
const validResults = results.filter(r => r > 0);
const avgTime = validResults.length > 0 ? validResults.reduce((a, b) => a + b, 0) / validResults.length : 0;
const minTime = validResults.length > 0 ? Math.min(...validResults) : 0;
const maxTime = validResults.length > 0 ? Math.max(...validResults) : 0;
const endToEndValues = collectMetricValues(results, entry => entry.endToEndLatencyMs);
const submissionValues = collectMetricValues(results, entry => entry.submissionLatencyMs);
const confirmationValues = collectMetricValues(results, entry => entry.confirmationLatencyMs);

const endToEndStats = summarizeMetric(endToEndValues);
const submissionStats = summarizeMetric(submissionValues);
const confirmationStats = summarizeMetric(confirmationValues);

// Calculate success rate
const successRate = (successfulRuns.length / results.length) * 100;
Expand All @@ -60,9 +86,14 @@ export function analyzePerformanceResults(results: number[], performanceExpectat
expectedPercentage,
successfulRuns,
failedRuns,
avgTime,
minTime,
maxTime,
avgTime: endToEndStats.avg,
minTime: endToEndStats.min,
maxTime: endToEndStats.max,
metrics: {
endToEnd: endToEndStats,
submission: submissionStats,
confirmation: confirmationStats,
},
successRate,
totalRuns: results.length,
};
Expand All @@ -75,7 +106,7 @@ export function analyzePerformanceResults(results: number[], performanceExpectat
* @param performanceExpectations - Performance expectations configuration
* @returns Analysis data for further use
*/
export function assertPerformanceResults(results: number[], performanceExpectations: any): any {
export function assertPerformanceResults(results: PerformanceResultEntry[], performanceExpectations: any): any {
const analysis = analyzePerformanceResults(results, performanceExpectations);

console.log(`\n📊 Performance Test Results (${analysis.totalRuns} runs):`);
Expand All @@ -85,10 +116,29 @@ export function assertPerformanceResults(results: number[], performanceExpectati
console.log(
` ❌ Failed runs (>${analysis.threshold}ms or error): ${analysis.failedRuns.length}/${analysis.totalRuns}`
);
console.log(` 📈 Average time: ${analysis.avgTime.toFixed(2)}ms`);
console.log(` 🏃 Min time: ${analysis.minTime}ms`);
console.log(` 🐌 Max time: ${analysis.maxTime}ms`);
console.log(` 📋 All times: [${results.map(r => (r === -1 ? 'FAILED' : r)).join(', ')}]`);
console.log(` 📈 End-to-end average: ${analysis.metrics.endToEnd.avg.toFixed(2)}ms`);
console.log(` 🏃 End-to-end min: ${analysis.metrics.endToEnd.min}ms`);
console.log(` 🐌 End-to-end max: ${analysis.metrics.endToEnd.max}ms`);
console.log(
` 📤 Submission latency: avg=${analysis.metrics.submission.avg.toFixed(2)}ms, ` +
`min=${analysis.metrics.submission.min}ms, max=${analysis.metrics.submission.max}ms`
);
console.log(
` ⛓️ Confirmation latency: avg=${analysis.metrics.confirmation.avg.toFixed(2)}ms, ` +
`min=${analysis.metrics.confirmation.min}ms, max=${analysis.metrics.confirmation.max}ms`
);
console.log(
` 📋 End-to-end times: [` +
`${results.map(entry => (entry === null ? 'FAILED' : entry.endToEndLatencyMs)).join(', ')}]`
);
console.log(
` 📋 Submission times: [` +
`${results.map(entry => (entry === null ? 'FAILED' : entry.submissionLatencyMs)).join(', ')}]`
);
console.log(
` 📋 Confirmation times: [` +
`${results.map(entry => (entry === null ? 'FAILED' : entry.confirmationLatencyMs)).join(', ')}]`
);

console.log(`\n📈 Success Rate Analysis:`);
console.log(` Target: ${analysis.expectedPercentage}% of runs should complete within ${analysis.threshold}ms`);
Expand All @@ -114,7 +164,7 @@ export function assertPerformanceResults(results: number[], performanceExpectati
* @returns The detailed performance result object
*/
export function recordPerformanceResults(
results: number[],
results: PerformanceResultEntry[],
analysis: any,
outputPath: string = 'tests/performanceResult.json'
): any {
Expand All @@ -123,15 +173,19 @@ export function recordPerformanceResults(
totalRuns: analysis.totalRuns,
successfulRuns: analysis.successfulRuns.length,
failedRuns: analysis.failedRuns.length,
averageTime: analysis.avgTime,
minTime: analysis.minTime,
maxTime: analysis.maxTime,
endToEnd: analysis.metrics.endToEnd,
submission: analysis.metrics.submission,
confirmation: analysis.metrics.confirmation,
threshold: analysis.threshold,
},
individualRuns: results.map((time, index) => ({
individualRuns: results.map((entry, index) => ({
run: index + 1,
time: time === -1 ? 'FAILED' : time,
status: time === -1 ? 'FAILED' : time <= analysis.threshold ? 'SUCCESS' : 'TIMEOUT',
endToEndLatencyMs: entry === null ? 'FAILED' : entry.endToEndLatencyMs,
submissionLatencyMs: entry === null ? 'FAILED' : entry.submissionLatencyMs,
confirmationLatencyMs: entry === null ? 'FAILED' : entry.confirmationLatencyMs,
transactionHash: entry === null ? undefined : entry.transactionHash,
blockNumber: entry === null ? undefined : entry.blockNumber,
status: entry === null ? 'FAILED' : entry.endToEndLatencyMs <= analysis.threshold ? 'SUCCESS' : 'TIMEOUT',
})),
};

Expand Down
Loading