diff --git a/.github/workflows/checkE2ETestCode.yml b/.github/workflows/checkE2ETestCode.yml index 090b7a7f23e46..79fe28ac3118a 100644 --- a/.github/workflows/checkE2ETestCode.yml +++ b/.github/workflows/checkE2ETestCode.yml @@ -18,6 +18,9 @@ jobs: - name: Setup Node uses: ./.github/actions/composite/setupNode + with: + IS_DESKTOP_BUILD: 'true' - name: Verify e2e tests compile correctly - run: npm run e2e-test-runner-build \ No newline at end of file + run: npm run e2e-test-runner-build + diff --git a/.github/workflows/e2ePerformanceTests.yml b/.github/workflows/e2ePerformanceTests.yml index 380b874c6c953..6ad886e179932 100644 --- a/.github/workflows/e2ePerformanceTests.yml +++ b/.github/workflows/e2ePerformanceTests.yml @@ -215,17 +215,29 @@ jobs: - name: Print AWS Device Farm run results if: always() - run: cat "./Host_Machine_Files/\$WORKING_DIRECTORY/output.md" + run: cat "./Host_Machine_Files/\$WORKING_DIRECTORY/output1.md" - name: Check if test failed, if so post the results and add the DeployBlocker label id: checkIfRegressionDetected run: | - if grep -q '🔴' "./Host_Machine_Files/\$WORKING_DIRECTORY/output.md"; then + if grep -q '🔴' "./Host_Machine_Files/\$WORKING_DIRECTORY/output1.md"; then # Create an output to the GH action that the test failed: echo "performanceRegressionDetected=true" >> "$GITHUB_OUTPUT" gh pr edit ${{ inputs.PR_NUMBER }} --add-label DeployBlockerCash - gh pr comment ${{ inputs.PR_NUMBER }} -F "./Host_Machine_Files/\$WORKING_DIRECTORY/output.md" + + # Check if there are any split files + if ls "./Host_Machine_Files/\$WORKING_DIRECTORY"/output2.md 1> /dev/null 2>&1; then + # Post each split file as a separate comment + for file in "./Host_Machine_Files/\$WORKING_DIRECTORY/output"*; do + if [ -f "$file" ]; then + gh pr comment ${{ inputs.PR_NUMBER }} -F "$file" + fi + done + else + gh pr comment ${{ inputs.PR_NUMBER }} -F "./Host_Machine_Files/\$WORKING_DIRECTORY/output1.md" + fi + gh pr comment ${{ inputs.PR_NUMBER }} -b "@Expensify/mobile-deployers 📣 Please look into this performance regression as it's a deploy blocker." else echo "performanceRegressionDetected=false" >> "$GITHUB_OUTPUT" @@ -237,7 +249,7 @@ jobs: - name: Check if test has skipped tests id: checkIfSkippedTestsDetected run: | - if grep -q '⚠️' "./Host_Machine_Files/\$WORKING_DIRECTORY/output.md"; then + if grep -q '⚠️' "./Host_Machine_Files/\$WORKING_DIRECTORY/output1.md"; then # Create an output to the GH action that the tests were skipped: echo "skippedTestsDetected=true" >> "$GITHUB_OUTPUT" else diff --git a/contributingGuides/REASSURE_PERFORMANCE_TEST.md b/contributingGuides/REASSURE_PERFORMANCE_TEST.md index 0e3f856819a91..a41d49517303d 100644 --- a/contributingGuides/REASSURE_PERFORMANCE_TEST.md +++ b/contributingGuides/REASSURE_PERFORMANCE_TEST.md @@ -23,13 +23,13 @@ We use Reassure for monitoring performance regression. It helps us check if our - Identifying functions with heavy calculations. - Targeting functions that are frequently used throughout the app. -## Running tests locally +## Running tests locally - Checkout your base environment, eg. `git checkout main`. - Collect baseline metrics with `npm run perf-test -- --baseline`. - Apply any desired changes (for testing purposes you can eg. try to slow down a list). - Collect current metrics with `npm run perf-test`. -- Open up the resulting `output.md` / `output.json` (see console output) to compare the results. +- Open up the resulting `output1.md` (and possibly consecutive output files) / `output.json` (see console output) to compare the results. - With all that information, Reassure can present the render duration times as statistically significant or meaningless. ## Metrics for Regression Detection diff --git a/tests/e2e/compare/compare.ts b/tests/e2e/compare/compare.ts index ad38c249bff3a..6e7c041293484 100644 --- a/tests/e2e/compare/compare.ts +++ b/tests/e2e/compare/compare.ts @@ -92,13 +92,13 @@ function compareResults(baselineEntries: Metric | string, compareEntries: Metric } type Options = { - outputFile: string; + outputDir: string; outputFormat: 'console' | 'markdown' | 'all'; metricForTest: Record; skippedTests: string[]; }; -export default (main: Metric | string, delta: Metric | string, {outputFile, outputFormat = 'all', metricForTest = {}, skippedTests}: Options) => { +export default (main: Metric | string, delta: Metric | string, {outputDir, outputFormat = 'all', metricForTest = {}, skippedTests}: Options) => { // IMPORTANT NOTE: make sure you are passing the main/baseline results first, then the delta/compare results: const outputData = compareResults(main, delta, metricForTest); @@ -107,7 +107,7 @@ export default (main: Metric | string, delta: Metric | string, {outputFile, outp } if (outputFormat === 'markdown' || outputFormat === 'all') { - return writeToMarkdown(outputFile, outputData, skippedTests); + return writeToMarkdown(outputDir, outputData, skippedTests); } }; export {compareResults}; diff --git a/tests/e2e/compare/output/markdown.ts b/tests/e2e/compare/output/markdown.ts index bd32d2d99ab23..4ddec22764f1d 100644 --- a/tests/e2e/compare/output/markdown.ts +++ b/tests/e2e/compare/output/markdown.ts @@ -7,6 +7,10 @@ import type {Data, Entry} from './console'; import * as format from './format'; import markdownTable from './markdownTable'; +const MAX_CHARACTERS_PER_FILE = 65536; +const FILE_SIZE_SAFETY_MARGIN = 1000; +const MAX_CHARACTERS_PER_FILE_WITH_SAFETY_MARGIN = MAX_CHARACTERS_PER_FILE - FILE_SIZE_SAFETY_MARGIN; + const tableHeader = ['Name', 'Duration']; const collapsibleSection = (title: string, content: string) => `
\n${title}\n\n${content}\n
\n\n`; @@ -45,15 +49,27 @@ const formatEntryDuration = (entry: Entry): string => { return ''; }; -const buildDetailsTable = (entries: Entry[]) => { +const buildDetailsTable = (entries: Entry[], numberOfTables = 1) => { if (!entries.length) { - return ''; + return ['']; } - const rows = entries.map((entry) => [entry.name, buildDurationDetailsEntry(entry)]); - const content = markdownTable([tableHeader, ...rows]); + const entriesPerTable = Math.floor(entries.length / numberOfTables); + const tables: string[] = []; + for (let i = 0; i < numberOfTables; i++) { + const start = i * entriesPerTable; + const end = i === numberOfTables - 1 ? entries.length : start + entriesPerTable; + const tableEntries = entries.slice(start, end); + + const rows = tableEntries.map((entry) => [entry.name, buildDurationDetailsEntry(entry)]); + const content = markdownTable([tableHeader, ...rows]); + + const tableMarkdown = collapsibleSection('Show details', content); - return collapsibleSection('Show details', content); + tables.push(tableMarkdown); + } + + return tables; }; const buildSummaryTable = (entries: Entry[], collapse = false) => { @@ -67,36 +83,75 @@ const buildSummaryTable = (entries: Entry[], collapse = false) => { return collapse ? collapsibleSection('Show entries', content) : content; }; -const buildMarkdown = (data: Data, skippedTests: string[]) => { - let result = '## Performance Comparison Report 📊'; +const buildMarkdown = (data: Data, skippedTests: string[], numberOfExtraFiles?: number): [string, ...string[]] => { + let singleFileOutput: string | undefined; + let nExtraFiles = numberOfExtraFiles ?? 0; + + // If the user didn't specify the number of extra files, calculate it based on the size of the single file + if (numberOfExtraFiles === undefined) { + singleFileOutput = buildMarkdown(data, skippedTests, 0)[0]; + const totalCharacters = singleFileOutput.length ?? 0; + + // If the single file is small enough, return it + if (totalCharacters <= MAX_CHARACTERS_PER_FILE_WITH_SAFETY_MARGIN) { + return [singleFileOutput]; + } + + // Otherwise, calculate the number of extra files needed + nExtraFiles = Math.ceil(totalCharacters / MAX_CHARACTERS_PER_FILE_WITH_SAFETY_MARGIN); + } + + let mainFile = '## Performance Comparison Report 📊'; + mainFile += nExtraFiles > 0 ? ` (1/${nExtraFiles + 1})` : ''; if (data.errors?.length) { - result += '\n\n### Errors\n'; + mainFile += '\n\n### Errors\n'; data.errors.forEach((message) => { - result += ` 1. 🛑 ${message}\n`; + mainFile += ` 1. 🛑 ${message}\n`; }); } if (data.warnings?.length) { - result += '\n\n### Warnings\n'; + mainFile += '\n\n### Warnings\n'; data.warnings.forEach((message) => { - result += ` 1. 🟡 ${message}\n`; + mainFile += ` 1. 🟡 ${message}\n`; }); } - result += '\n\n### Significant Changes To Duration'; - result += `\n${buildSummaryTable(data.significance)}`; - result += `\n${buildDetailsTable(data.significance)}`; - result += '\n\n### Meaningless Changes To Duration'; - result += `\n${buildSummaryTable(data.meaningless, true)}`; - result += `\n${buildDetailsTable(data.meaningless)}`; - result += '\n'; - if (skippedTests.length > 0) { - result += `⚠️ Some tests did not pass successfully, so some results are omitted from final report: ${skippedTests.join(', ')}`; + mainFile += `⚠️ Some tests did not pass successfully, so some results are omitted from final report: ${skippedTests.join(', ')}`; } - return result; + mainFile += '\n\n### Significant Changes To Duration'; + mainFile += `\n${buildSummaryTable(data.significance)}`; + mainFile += `\n${buildDetailsTable(data.significance, 1).at(0)}`; + + const meaninglessDetailsTables = buildDetailsTable(data.meaningless, nExtraFiles); + + if (nExtraFiles === 0) { + mainFile += '\n\n### Meaningless Changes To Duration'; + mainFile += `\n${buildSummaryTable(data.meaningless, true)}`; + mainFile += `\n${meaninglessDetailsTables.at(0)}`; + + return [mainFile]; + } + + const extraFiles: string[] = []; + for (let i = 0; i < nExtraFiles; i++) { + let extraFile = '## Performance Comparison Report 📊'; + extraFile += nExtraFiles > 0 ? ` (${i + 2}/${nExtraFiles + 1})` : ''; + + extraFile += '\n\n### Meaningless Changes To Duration'; + extraFile += nExtraFiles > 0 ? ` (${i + 1}/${nExtraFiles + 1})` : ''; + + extraFile += `\n${buildSummaryTable(data.meaningless, true)}`; + extraFile += `\n${meaninglessDetailsTables.at(i)}`; + extraFile += '\n'; + + extraFiles.push(extraFile); + } + + return [mainFile, ...extraFiles]; }; const writeToFile = (filePath: string, content: string) => @@ -113,13 +168,24 @@ const writeToFile = (filePath: string, content: string) => throw error; }); -const writeToMarkdown = (filePath: string, data: Data, skippedTests: string[]) => { - const markdown = buildMarkdown(data, skippedTests); - Logger.info('Markdown was built successfully, writing to file...', markdown); - return writeToFile(filePath, markdown).catch((error) => { - console.error(error); - throw error; - }); +const writeToMarkdown = (outputDir: string, data: Data, skippedTests: string[]) => { + const markdownFiles = buildMarkdown(data, skippedTests); + const filesString = markdownFiles.join('\n\n'); + Logger.info('Markdown was built successfully, writing to file...', filesString); + + if (markdownFiles.length === 1) { + return writeToFile(path.join(outputDir, 'output1.md'), markdownFiles[0]); + } + + return Promise.all( + markdownFiles.map((file, index) => { + const filePath = `${outputDir}/output-${index + 1}.md`; + return writeToFile(filePath, file).catch((error) => { + console.error(error); + throw error; + }); + }), + ); }; export default writeToMarkdown; diff --git a/tests/e2e/testRunner.ts b/tests/e2e/testRunner.ts index 7d55e0c81e488..c639834dc4dc1 100644 --- a/tests/e2e/testRunner.ts +++ b/tests/e2e/testRunner.ts @@ -357,7 +357,7 @@ const runTests = async (): Promise => { // Calculate statistics and write them to our work file Logger.info('Calculating statics and writing results'); await compare(results.main, results.delta, { - outputFile: `${config.OUTPUT_DIR}/output.md`, + outputDir: config.OUTPUT_DIR, outputFormat: 'all', metricForTest, skippedTests, diff --git a/tests/unit/E2EMarkdownTest.ts b/tests/unit/E2EMarkdownTest.ts index 766ec708f31bc..bbf33cc60a245 100644 --- a/tests/unit/E2EMarkdownTest.ts +++ b/tests/unit/E2EMarkdownTest.ts @@ -13,6 +13,7 @@ const results = { describe('markdown formatter', () => { it('should format significant changes properly', () => { const data = compareResults(results.main, results.delta, {commentLinking: 'ms'}); - expect(buildMarkdown(data, [])).toMatchSnapshot(); + const markdown = buildMarkdown(data, []).join('\nEOF\n\n'); + expect(markdown).toMatchSnapshot(); }); }); diff --git a/tests/unit/__snapshots__/E2EMarkdownTest.ts.snap b/tests/unit/__snapshots__/E2EMarkdownTest.ts.snap index 9a66a0e2e823e..c82c0f9c045da 100644 --- a/tests/unit/__snapshots__/E2EMarkdownTest.ts.snap +++ b/tests/unit/__snapshots__/E2EMarkdownTest.ts.snap @@ -19,6 +19,5 @@ exports[`markdown formatter should format significant changes properly 1`] = ` ### Meaningless Changes To Duration _There are no entries_ - " `;