From d56dcd0bc1029ee7547c82f304d2c2282bf6a954 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Wed, 4 Aug 2021 17:21:41 +0300 Subject: [PATCH 01/11] Code refactoring: 1) Renamed ReviewResult -> GeneralReviewResult. 2) Added ReviewResult from which GeneralReviewResult and FileReviewResult are now inherited. 3) Put common code when printing GeneralReviewResult in different formats in the convert_review_result_to_json_dict function. 4) Also prepared the code for further addition of difficulty levels. 5) Fixed the test: now the json schema is checked more strictly. --- src/python/review/reviewers/common.py | 27 +--- src/python/review/reviewers/perform_review.py | 6 +- src/python/review/reviewers/python.py | 4 +- src/python/review/reviewers/review_result.py | 20 +-- .../review/reviewers/utils/print_review.py | 140 ++++++++++-------- .../test_single_file_json_format.py | 4 + 6 files changed, 101 insertions(+), 100 deletions(-) diff --git a/src/python/review/reviewers/common.py b/src/python/review/reviewers/common.py index 1c7628fb..22a56f48 100644 --- a/src/python/review/reviewers/common.py +++ b/src/python/review/reviewers/common.py @@ -16,7 +16,7 @@ from src.python.review.quality.evaluate_quality import evaluate_quality from src.python.review.quality.model import Quality from src.python.review.quality.penalty import categorize, get_previous_issues_by_language, Punisher -from src.python.review.reviewers.review_result import FileReviewResult, ReviewResult +from src.python.review.reviewers.review_result import FileReviewResult, GeneralReviewResult from src.python.review.reviewers.utils.code_statistics import gather_code_statistics from src.python.review.reviewers.utils.issues_filter import filter_duplicate_issues, filter_low_measure_issues from src.python.review.reviewers.utils.metadata_exploration import FileMetadata, Metadata @@ -43,9 +43,7 @@ } -def perform_language_review(metadata: Metadata, - config: ApplicationConfig, - language: Language) -> ReviewResult: +def perform_language_review(metadata: Metadata, config: ApplicationConfig, language: Language) -> GeneralReviewResult: inspectors = LANGUAGE_TO_INSPECTORS[language] issues = inspect_in_parallel(metadata.path, config, inspectors) @@ -73,27 +71,18 @@ def perform_language_review(metadata: Metadata, file_review_results = [] for file_metadata in files_metadata: - issues = file_path_to_issues[file_metadata.path] - code_statistics = gather_code_statistics(issues, file_metadata.path) + file_issues = file_path_to_issues[file_metadata.path] + code_statistics = gather_code_statistics(file_issues, file_metadata.path) code_statistics.total_lines = min(code_statistics.total_lines, get_range_lines(config.start_line, config.end_line)) - punisher = Punisher(issues, previous_issues) + punisher = Punisher(file_issues, previous_issues) quality = evaluate_quality(code_statistics, language) general_quality = general_quality.merge(quality) - file_review_results.append(FileReviewResult( - file_metadata.path, - issues, - quality, - punisher, - )) - - return ReviewResult( - file_review_results, - general_quality, - general_punisher, - ) + file_review_results.append(FileReviewResult(quality, punisher, file_issues, file_metadata.path)) + + return GeneralReviewResult(general_quality, general_punisher, issues, file_review_results) def filter_out_of_range_issues(issues: List[BaseIssue], diff --git a/src/python/review/reviewers/perform_review.py b/src/python/review/reviewers/perform_review.py index fb44c9a0..a2561d05 100644 --- a/src/python/review/reviewers/perform_review.py +++ b/src/python/review/reviewers/perform_review.py @@ -9,7 +9,7 @@ from src.python.review.inspectors.issue import IssueType from src.python.review.reviewers.common import perform_language_review from src.python.review.reviewers.python import perform_python_review -from src.python.review.reviewers.review_result import ReviewResult +from src.python.review.reviewers.review_result import GeneralReviewResult from src.python.review.reviewers.utils.metadata_exploration import explore_file, explore_project from src.python.review.reviewers.utils.print_review import ( print_review_result_as_json, @@ -68,10 +68,10 @@ def perform_and_print_review(path: Path, print_review_result_as_text(review_result, path, config) # Don't count INFO issues too - return len(list(filter(lambda issue: issue.type != IssueType.INFO, review_result.all_issues))) + return len(list(filter(lambda issue: issue.type != IssueType.INFO, review_result.issues))) -def perform_review(path: Path, config: ApplicationConfig) -> ReviewResult: +def perform_review(path: Path, config: ApplicationConfig) -> GeneralReviewResult: if not path.exists(): raise PathNotExists diff --git a/src/python/review/reviewers/python.py b/src/python/review/reviewers/python.py index d51b526a..14d7b85e 100644 --- a/src/python/review/reviewers/python.py +++ b/src/python/review/reviewers/python.py @@ -5,11 +5,11 @@ from src.python.review.common.file_system import FileSystemItem, get_all_file_system_items from src.python.review.common.language import Language from src.python.review.reviewers.common import perform_language_review -from src.python.review.reviewers.review_result import ReviewResult +from src.python.review.reviewers.review_result import GeneralReviewResult from src.python.review.reviewers.utils.metadata_exploration import Metadata, ProjectMetadata -def perform_python_review(metadata: Metadata, config: ApplicationConfig) -> ReviewResult: +def perform_python_review(metadata: Metadata, config: ApplicationConfig) -> GeneralReviewResult: created_file_paths = [] if isinstance(metadata, ProjectMetadata): created_file_paths.extend(create_init_scripts_in_subdirs(metadata.path)) diff --git a/src/python/review/reviewers/review_result.py b/src/python/review/reviewers/review_result.py index 8ca9ff23..5853864f 100644 --- a/src/python/review/reviewers/review_result.py +++ b/src/python/review/reviewers/review_result.py @@ -8,23 +8,17 @@ @dataclass -class FileReviewResult: - file_path: Path - issues: List[BaseIssue] +class ReviewResult: quality: Quality punisher: Punisher + issues: List[BaseIssue] @dataclass -class ReviewResult: - file_review_results: List[FileReviewResult] - general_quality: Quality - general_punisher: Punisher +class FileReviewResult(ReviewResult): + file_path: Path - @property - def all_issues(self) -> List[BaseIssue]: - issues = [] - for file_review_result in self.file_review_results: - issues.extend(file_review_result.issues) - return issues +@dataclass +class GeneralReviewResult(ReviewResult): + file_review_results: List[FileReviewResult] diff --git a/src/python/review/reviewers/utils/print_review.py b/src/python/review/reviewers/utils/print_review.py index 66a5f2a3..68f1e805 100644 --- a/src/python/review/reviewers/utils/print_review.py +++ b/src/python/review/reviewers/utils/print_review.py @@ -9,16 +9,15 @@ from src.python.review.common.file_system import get_file_line from src.python.review.inspectors.inspector_type import InspectorType from src.python.review.inspectors.issue import BaseIssue, IssueType -from src.python.review.reviewers.review_result import ReviewResult +from src.python.review.quality.model import QualityType +from src.python.review.reviewers.review_result import FileReviewResult, GeneralReviewResult, ReviewResult -def print_review_result_as_text(review_result: ReviewResult, - path: Path, - config: ApplicationConfig) -> None: - heading = f'\nReview of {str(path)} ({len(review_result.all_issues)} violations)' +def print_review_result_as_text(review_result: GeneralReviewResult, path: Path, config: ApplicationConfig) -> None: + heading = f'\nReview of {str(path)} ({len(review_result.issues)} violations)' print(heading) - if len(review_result.all_issues) == 0: + if len(review_result.issues) == 0: print('There is no issues found') else: for file_review_result in review_result.file_review_results: @@ -51,73 +50,89 @@ def print_review_result_as_text(review_result: ReviewResult, print('*' * len(heading)) print('General quality:') - print(review_result.general_quality, end='') + print(review_result.quality, end='') -def print_review_result_as_json(review_result: ReviewResult, config: ApplicationConfig) -> None: - issues = review_result.all_issues +def _get_quality_without_penalty(review_result: ReviewResult): + return review_result.quality.quality_type + +def _get_quality_with_penalty(review_result: ReviewResult): + quality_without_penalty = _get_quality_without_penalty(review_result) + return review_result.punisher.get_quality_with_penalty(quality_without_penalty) + + +def get_quality_json_dict(quality: QualityType) -> Dict: + return { + OutputJsonFields.CODE.value: quality.value, + OutputJsonFields.TEXT.value: f'Code quality (beta): {quality.value}', + } + + +def get_influence_json_dict(origin_class: str, review_result: ReviewResult) -> int: + quality_without_penalty = _get_quality_without_penalty(review_result) + quality_with_penalty = _get_quality_with_penalty(review_result) + + if quality_with_penalty != quality_without_penalty: + return review_result.punisher.get_issue_influence_on_penalty(origin_class) + + return 0 + + +def convert_review_result_to_json_dict(review_result: ReviewResult, config: ApplicationConfig) -> Dict: + issues = review_result.issues issues.sort(key=lambda issue: issue.line_no) - quality_without_penalty = review_result.general_quality.quality_type - quality_with_penalty = review_result.general_punisher.get_quality_with_penalty(quality_without_penalty) - output_json = {'quality': { - 'code': quality_with_penalty.value, - 'text': f'Code quality (beta): {quality_with_penalty.value}', - }, 'issues': []} + quality_with_penalty = _get_quality_with_penalty(review_result) - for issue in issues: - influence_on_penalty = 0 - if quality_with_penalty != quality_without_penalty: - influence_on_penalty = review_result.general_punisher.get_issue_influence_on_penalty(issue.origin_class) + output_json = {} - output_json['issues'].append(convert_issue_to_json(issue, config, influence_on_penalty)) + if isinstance(review_result, FileReviewResult): + output_json[OutputJsonFields.FILE_NAME.value] = str(review_result.file_path) - print(json.dumps(output_json)) + output_json[OutputJsonFields.QUALITY.value] = get_quality_json_dict(quality_with_penalty) + output_json[OutputJsonFields.ISSUES.value] = [] + for issue in issues: + json_issue = convert_issue_to_json(issue, config) -def print_review_result_as_multi_file_json(review_result: ReviewResult, config: ApplicationConfig) -> None: - file_review_result_jsons = [] + json_issue[OutputJsonFields.INFLUENCE_ON_PENALTY.value] = get_influence_json_dict( + issue.origin_class, review_result, + ) - review_result.file_review_results.sort(key=lambda result: result.file_path) + output_json[OutputJsonFields.ISSUES.value].append(json_issue) - for file_review_result in review_result.file_review_results: - quality_without_penalty = file_review_result.quality.quality_type - quality_with_penalty = file_review_result.punisher.get_quality_with_penalty(quality_without_penalty) - file_review_result_json = { - 'file_name': str(file_review_result.file_path), - 'quality': { - 'code': quality_with_penalty.value, - 'text': f'Code quality (beta): {quality_with_penalty.value}', - }, - 'issues': [], - } + return output_json - file_review_result_jsons.append(file_review_result_json) - for issue in file_review_result.issues: - influence_on_penalty = 0 - if quality_with_penalty != quality_without_penalty: - influence_on_penalty = file_review_result.punisher.get_issue_influence_on_penalty(issue.origin_class) +def print_review_result_as_json(review_result: GeneralReviewResult, config: ApplicationConfig) -> None: + print(json.dumps(convert_review_result_to_json_dict(review_result, config))) - file_review_result_json['issues'].append(convert_issue_to_json(issue, config, influence_on_penalty)) - quality_without_penalty = review_result.general_quality.quality_type - quality_with_penalty = review_result.general_punisher.get_quality_with_penalty(quality_without_penalty) +def print_review_result_as_multi_file_json(review_result: GeneralReviewResult, config: ApplicationConfig) -> None: + review_result.file_review_results.sort(key=lambda result: result.file_path) + + file_review_result_jsons = [] + for file_review_result in review_result.file_review_results: + file_review_result_jsons.append(convert_review_result_to_json_dict(file_review_result, config)) + + quality_with_penalty = _get_quality_with_penalty(review_result) output_json = { - 'quality': { - 'code': quality_with_penalty.value, - 'text': f'Code quality (beta): {quality_with_penalty.value}', - }, - 'file_review_results': file_review_result_jsons, + OutputJsonFields.QUALITY.value: get_quality_json_dict(quality_with_penalty), + OutputJsonFields.FILE_REVIEW_RESULTS.value: file_review_result_jsons, } print(json.dumps(output_json)) @unique -class IssueJsonFields(Enum): +class OutputJsonFields(Enum): + QUALITY = 'quality' + ISSUES = 'issues' + FILE_REVIEW_RESULTS = 'file_review_results' + FILE_NAME = 'file_name' + CODE = 'code' TEXT = 'text' LINE = 'line' @@ -127,7 +142,7 @@ class IssueJsonFields(Enum): INFLUENCE_ON_PENALTY = 'influence_on_penalty' -def convert_issue_to_json(issue: BaseIssue, config: ApplicationConfig, influence_on_penalty: int = 0) -> Dict[str, Any]: +def convert_issue_to_json(issue: BaseIssue, config: ApplicationConfig) -> Dict[str, Any]: line_text = get_file_line(issue.file_path, issue.line_no) issue_type = issue.type @@ -135,13 +150,12 @@ def convert_issue_to_json(issue: BaseIssue, config: ApplicationConfig, influence issue_type = issue_type.to_main_type() return { - IssueJsonFields.CODE.value: issue.origin_class, - IssueJsonFields.TEXT.value: issue.description, - IssueJsonFields.LINE.value: line_text, - IssueJsonFields.LINE_NUMBER.value: issue.line_no, - IssueJsonFields.COLUMN_NUMBER.value: issue.column_no, - IssueJsonFields.CATEGORY.value: issue_type.value, - IssueJsonFields.INFLUENCE_ON_PENALTY.value: influence_on_penalty, + OutputJsonFields.CODE.value: issue.origin_class, + OutputJsonFields.TEXT.value: issue.description, + OutputJsonFields.LINE.value: line_text, + OutputJsonFields.LINE_NUMBER.value: issue.line_no, + OutputJsonFields.COLUMN_NUMBER.value: issue.column_no, + OutputJsonFields.CATEGORY.value: issue_type.value, } @@ -151,15 +165,15 @@ def convert_json_to_issues(issues_json: List[dict]) -> List[PenaltyIssue]: for issue in issues_json: issues.append( PenaltyIssue( - origin_class=issue[IssueJsonFields.CODE.value], - description=issue[IssueJsonFields.TEXT.value], - line_no=int(issue[IssueJsonFields.LINE_NUMBER.value]), - column_no=int(issue[IssueJsonFields.COLUMN_NUMBER.value]), - type=IssueType(issue[IssueJsonFields.CATEGORY.value]), + origin_class=issue[OutputJsonFields.CODE.value], + description=issue[OutputJsonFields.TEXT.value], + line_no=int(issue[OutputJsonFields.LINE_NUMBER.value]), + column_no=int(issue[OutputJsonFields.COLUMN_NUMBER.value]), + type=IssueType(issue[OutputJsonFields.CATEGORY.value]), file_path=Path(), inspector_type=InspectorType.UNDEFINED, - influence_on_penalty=issue.get(IssueJsonFields.INFLUENCE_ON_PENALTY.value, 0), + influence_on_penalty=issue.get(OutputJsonFields.INFLUENCE_ON_PENALTY.value, 0), ), ) return issues diff --git a/test/python/functional_tests/test_single_file_json_format.py b/test/python/functional_tests/test_single_file_json_format.py index 85616145..1a5bc8ca 100644 --- a/test/python/functional_tests/test_single_file_json_format.py +++ b/test/python/functional_tests/test_single_file_json_format.py @@ -13,6 +13,7 @@ 'code': {'type': 'string'}, 'text': {'type': 'string'}, }, + 'additionalProperties': False, }, 'issues': { 'type': 'array', @@ -25,10 +26,13 @@ 'line': {'type': 'string'}, 'line_number': {'type': 'number'}, 'text': {'type': 'string'}, + 'influence_on_penalty': {'type': 'number'}, }, + 'additionalProperties': False, }, }, }, + 'additionalProperties': False, } From 9ff00f1cd59bbb412fa682febe294869072feb7a Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 10:26:22 +0300 Subject: [PATCH 02/11] get_influence_json_dict -> get_influence_on_penalty_json_dict --- src/python/review/reviewers/utils/print_review.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/review/reviewers/utils/print_review.py b/src/python/review/reviewers/utils/print_review.py index 68f1e805..23c5b839 100644 --- a/src/python/review/reviewers/utils/print_review.py +++ b/src/python/review/reviewers/utils/print_review.py @@ -69,7 +69,7 @@ def get_quality_json_dict(quality: QualityType) -> Dict: } -def get_influence_json_dict(origin_class: str, review_result: ReviewResult) -> int: +def get_influence_on_penalty_json_dict(origin_class: str, review_result: ReviewResult) -> int: quality_without_penalty = _get_quality_without_penalty(review_result) quality_with_penalty = _get_quality_with_penalty(review_result) @@ -96,7 +96,7 @@ def convert_review_result_to_json_dict(review_result: ReviewResult, config: Appl for issue in issues: json_issue = convert_issue_to_json(issue, config) - json_issue[OutputJsonFields.INFLUENCE_ON_PENALTY.value] = get_influence_json_dict( + json_issue[OutputJsonFields.INFLUENCE_ON_PENALTY.value] = get_influence_on_penalty_json_dict( issue.origin_class, review_result, ) From 144b828a7dd863a40f6272647697fd463ea7d841 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 11:31:37 +0300 Subject: [PATCH 03/11] Fixed the old test and added a new one --- .../test_single_file_json_format.py | 82 ++++++++++++++++--- .../json_format/code_with_issues.py | 7 -- 2 files changed, 71 insertions(+), 18 deletions(-) delete mode 100644 test/resources/functional_tests/json_format/code_with_issues.py diff --git a/test/python/functional_tests/test_single_file_json_format.py b/test/python/functional_tests/test_single_file_json_format.py index 1a5bc8ca..704b6285 100644 --- a/test/python/functional_tests/test_single_file_json_format.py +++ b/test/python/functional_tests/test_single_file_json_format.py @@ -1,47 +1,107 @@ import json import subprocess -from test.python.functional_tests.conftest import DATA_PATH, LocalCommandBuilder +from test.Ашч.functional_tests.conftest import DATA_PATH, LocalCommandBuilder +import pytest from jsonschema import validate -schema = { +OLD_FORMAT_SCHEMA = { 'type': 'object', + 'additionalProperties': False, 'properties': { 'quality': { 'type': 'object', + 'additionalProperties': False, 'properties': { 'code': {'type': 'string'}, 'text': {'type': 'string'}, }, - 'additionalProperties': False, }, 'issues': { 'type': 'array', + 'additionalItems': False, 'items': { 'type': 'object', + 'additionalProperties': False, 'properties': { 'category': {'type': 'string'}, 'code': {'type': 'string'}, - 'column_number': {'type': 'number'}, + 'column_number': {'type': 'integer'}, 'line': {'type': 'string'}, - 'line_number': {'type': 'number'}, + 'line_number': {'type': 'integer'}, 'text': {'type': 'string'}, - 'influence_on_penalty': {'type': 'number'}, + 'influence_on_penalty': {'type': 'integer'}, }, - 'additionalProperties': False, }, }, }, - 'additionalProperties': False, } +NEW_FORMAT_SCHEMA = { + "type": "object", + "additionalProperties": False, + "properties": { + "quality": { + "type": "object", + "additionalProperties": False, + "properties": { + "code": {"type": "string"}, + "text": {"type": "string"}, + }, + }, + "file_review_results": { + "type": "array", + "additionalItems": False, + "items": { + "type": "object", + "additionalProperties": False, + "properties": { + "file_name": {"type": "string"}, + "quality": { + "type": "object", + "additionalProperties": False, + "properties": { + "code": {"type": "string"}, + "text": {"type": "string"}, + }, + }, + "issues": { + "type": "array", + "additionalItems": False, + "items": { + "type": "object", + "additionalProperties": False, + "properties": { + "code": {"type": "string"}, + "text": {"type": "string"}, + "line": {"type": "string"}, + "line_number": {"type": "integer"}, + "column_number": {"type": "integer"}, + "category": {"type": "string"}, + "influence_on_penalty": {"type": "integer"}, + }, + }, + }, + }, + }, + }, + }, +} + +JSON_FORMAT_TEST_DATA = [ + (False, OLD_FORMAT_SCHEMA), + (True, NEW_FORMAT_SCHEMA), +] + -def test_json_format(local_command: LocalCommandBuilder): - file_path = DATA_PATH / 'json_format' / 'code_with_issues.py' +@pytest.mark.parametrize(('new_format', 'schema'), JSON_FORMAT_TEST_DATA) +def test_json_format(local_command: LocalCommandBuilder, new_format: bool, schema: str): + project_path = DATA_PATH / 'file_or_project' / 'project' local_command.verbosity = 0 local_command.format = 'json' - local_command.path = file_path + local_command.path = project_path + local_command.new_format = new_format process = subprocess.run( local_command.build(), diff --git a/test/resources/functional_tests/json_format/code_with_issues.py b/test/resources/functional_tests/json_format/code_with_issues.py deleted file mode 100644 index ff6359b1..00000000 --- a/test/resources/functional_tests/json_format/code_with_issues.py +++ /dev/null @@ -1,7 +0,0 @@ -a, b, c = (input(),input(),input()) -print(f'{a} {b} {c}') - -print('1', '2', '3', sep = '|') - -result = int(a)+10 -print(result) \ No newline at end of file From d55f794f2511fac98f3ea83e512c61ef7564a3b6 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 11:31:54 +0300 Subject: [PATCH 04/11] typo fix --- test/python/functional_tests/test_single_file_json_format.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/python/functional_tests/test_single_file_json_format.py b/test/python/functional_tests/test_single_file_json_format.py index 704b6285..ee1caad4 100644 --- a/test/python/functional_tests/test_single_file_json_format.py +++ b/test/python/functional_tests/test_single_file_json_format.py @@ -1,6 +1,6 @@ import json import subprocess -from test.Ашч.functional_tests.conftest import DATA_PATH, LocalCommandBuilder +from test.python.functional_tests.conftest import DATA_PATH, LocalCommandBuilder import pytest from jsonschema import validate From fd71d5e9fda996a1cfd4b59c7d5982ab5e533159 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 11:32:42 +0300 Subject: [PATCH 05/11] Renamed file --- .../{test_single_file_json_format.py => test_json_format.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/python/functional_tests/{test_single_file_json_format.py => test_json_format.py} (100%) diff --git a/test/python/functional_tests/test_single_file_json_format.py b/test/python/functional_tests/test_json_format.py similarity index 100% rename from test/python/functional_tests/test_single_file_json_format.py rename to test/python/functional_tests/test_json_format.py From 3cde7098d996f01fb5dac456baf2c0511a746090 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 12:03:51 +0300 Subject: [PATCH 06/11] Fixed quotes --- .../functional_tests/test_json_format.py | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/test/python/functional_tests/test_json_format.py b/test/python/functional_tests/test_json_format.py index ee1caad4..65383336 100644 --- a/test/python/functional_tests/test_json_format.py +++ b/test/python/functional_tests/test_json_format.py @@ -38,47 +38,47 @@ } NEW_FORMAT_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "quality": { - "type": "object", - "additionalProperties": False, - "properties": { - "code": {"type": "string"}, - "text": {"type": "string"}, + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'quality': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'code': {'type': 'string'}, + 'text': {'type': 'string'}, }, }, - "file_review_results": { - "type": "array", - "additionalItems": False, - "items": { - "type": "object", - "additionalProperties": False, - "properties": { - "file_name": {"type": "string"}, - "quality": { - "type": "object", - "additionalProperties": False, - "properties": { - "code": {"type": "string"}, - "text": {"type": "string"}, + 'file_review_results': { + 'type': 'array', + 'additionalItems': False, + 'items': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'file_name': {'type': 'string'}, + 'quality': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'code': {'type': 'string'}, + 'text': {'type': 'string'}, }, }, - "issues": { - "type": "array", - "additionalItems": False, - "items": { - "type": "object", - "additionalProperties": False, - "properties": { - "code": {"type": "string"}, - "text": {"type": "string"}, - "line": {"type": "string"}, - "line_number": {"type": "integer"}, - "column_number": {"type": "integer"}, - "category": {"type": "string"}, - "influence_on_penalty": {"type": "integer"}, + 'issues': { + 'type': 'array', + 'additionalItems': False, + 'items': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'code': {'type': 'string'}, + 'text': {'type': 'string'}, + 'line': {'type': 'string'}, + 'line_number': {'type': 'integer'}, + 'column_number': {'type': 'integer'}, + 'category': {'type': 'string'}, + 'influence_on_penalty': {'type': 'integer'}, }, }, }, From 012456bcddfaec771e31280cf6488d5e51956368 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 13:51:57 +0300 Subject: [PATCH 07/11] Added a new test and refactored the old one --- .../functional_tests/test_json_format.py | 118 +++++++++++++++--- .../test_multi_file_project.py | 85 ------------- 2 files changed, 102 insertions(+), 101 deletions(-) delete mode 100644 test/python/functional_tests/test_multi_file_project.py diff --git a/test/python/functional_tests/test_json_format.py b/test/python/functional_tests/test_json_format.py index 65383336..ee5498d9 100644 --- a/test/python/functional_tests/test_json_format.py +++ b/test/python/functional_tests/test_json_format.py @@ -5,6 +5,25 @@ import pytest from jsonschema import validate + +def _get_output_json(local_command: LocalCommandBuilder, new_format: bool) -> str: + project_path = DATA_PATH / 'file_or_project' / 'project' + + local_command.verbosity = 0 + local_command.format = 'json' + local_command.path = project_path + local_command.new_format = new_format + + process = subprocess.run( + local_command.build(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout = process.stdout.decode() + + return json.loads(stdout) + + OLD_FORMAT_SCHEMA = { 'type': 'object', 'additionalProperties': False, @@ -88,27 +107,94 @@ }, } -JSON_FORMAT_TEST_DATA = [ +JSON_FORMAT_SCHEMA_TEST_DATA = [ (False, OLD_FORMAT_SCHEMA), (True, NEW_FORMAT_SCHEMA), ] -@pytest.mark.parametrize(('new_format', 'schema'), JSON_FORMAT_TEST_DATA) -def test_json_format(local_command: LocalCommandBuilder, new_format: bool, schema: str): - project_path = DATA_PATH / 'file_or_project' / 'project' +@pytest.mark.parametrize(('new_format', 'schema'), JSON_FORMAT_SCHEMA_TEST_DATA) +def test_json_format_schema(local_command: LocalCommandBuilder, new_format: bool, schema: str): + validate(_get_output_json(local_command, new_format), schema) - local_command.verbosity = 0 - local_command.format = 'json' - local_command.path = project_path - local_command.new_format = new_format - process = subprocess.run( - local_command.build(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout = process.stdout.decode() +ISSUES = [ + { + 'code': 'W0612', + 'text': 'Unused variable \'a\'', + 'line': 'a = 1', + 'line_number': 2, + 'column_number': 5, + 'category': 'BEST_PRACTICES', + 'influence_on_penalty': 0, + }, + { + 'code': 'W0612', + 'text': 'Unused variable \'b\'', + 'line': 'b = 2', + 'line_number': 3, + 'column_number': 5, + 'category': 'BEST_PRACTICES', + 'influence_on_penalty': 0, + }, + { + 'code': 'W0612', + 'text': 'Unused variable \'c\'', + 'line': 'c = 3', + 'line_number': 4, + 'column_number': 5, + 'category': 'BEST_PRACTICES', + 'influence_on_penalty': 0, + }, +] + +OLD_FORMAT_EXPECTED_JSON = { + 'quality': { + 'code': 'EXCELLENT', + 'text': 'Code quality (beta): EXCELLENT', + }, + 'issues': ISSUES, +} + +NEW_FORMAT_EXPECTED_JSON = { + 'quality': { + 'code': 'EXCELLENT', + 'text': 'Code quality (beta): EXCELLENT', + }, + 'file_review_results': [ + { + 'file_name': '__init__.py', + 'quality': { + 'code': 'EXCELLENT', + 'text': 'Code quality (beta): EXCELLENT', + }, + 'issues': [], + }, + { + 'file_name': 'one.py', + 'quality': { + 'code': 'EXCELLENT', + 'text': 'Code quality (beta): EXCELLENT', + }, + 'issues': [], + }, + { + 'file_name': 'other.py', + 'quality': { + 'code': 'GOOD', + 'text': 'Code quality (beta): GOOD', + }, + 'issues': ISSUES, + }, + ], +} + +JSON_FORMAT_TEST_DATA = [ + (False, OLD_FORMAT_EXPECTED_JSON), + (True, NEW_FORMAT_EXPECTED_JSON), +] + - output_json = json.loads(stdout) - validate(output_json, schema) +@pytest.mark.parametrize(('new_format', 'expected_json'), JSON_FORMAT_TEST_DATA) +def test_json_format(local_command: LocalCommandBuilder, new_format: bool, expected_json: str): + assert _get_output_json(local_command, new_format) == expected_json diff --git a/test/python/functional_tests/test_multi_file_project.py b/test/python/functional_tests/test_multi_file_project.py deleted file mode 100644 index 3f4178d5..00000000 --- a/test/python/functional_tests/test_multi_file_project.py +++ /dev/null @@ -1,85 +0,0 @@ -import json -import subprocess -from test.python.functional_tests.conftest import DATA_PATH, LocalCommandBuilder - -EXPECTED_JSON = { - 'quality': { - 'code': 'EXCELLENT', - 'text': 'Code quality (beta): EXCELLENT', - }, - 'file_review_results': [ - { - 'file_name': '__init__.py', - 'quality': { - 'code': 'EXCELLENT', - 'text': 'Code quality (beta): EXCELLENT', - }, - 'issues': [], - }, - { - 'file_name': 'one.py', - 'quality': { - 'code': 'EXCELLENT', - 'text': 'Code quality (beta): EXCELLENT', - }, - 'issues': [], - }, - { - 'file_name': 'other.py', - 'quality': { - 'code': 'GOOD', - 'text': 'Code quality (beta): GOOD', - }, - 'issues': [ - { - 'code': 'W0612', - 'text': 'Unused variable \'a\'', - 'line': 'a = 1', - 'line_number': 2, - 'column_number': 5, - 'category': 'BEST_PRACTICES', - 'influence_on_penalty': 0, - }, - { - 'code': 'W0612', - 'text': 'Unused variable \'b\'', - 'line': 'b = 2', - 'line_number': 3, - 'column_number': 5, - 'category': 'BEST_PRACTICES', - 'influence_on_penalty': 0, - }, - { - 'code': 'W0612', - 'text': 'Unused variable \'c\'', - 'line': 'c = 3', - 'line_number': 4, - 'column_number': 5, - 'category': 'BEST_PRACTICES', - 'influence_on_penalty': 0, - }, - ], - }, - ], -} - - -def test_json_format(local_command: LocalCommandBuilder): - file_path = DATA_PATH / 'file_or_project' / 'project' - - local_command.verbosity = 0 - local_command.format = 'json' - local_command.path = file_path - local_command.new_format = True - - process = subprocess.run( - local_command.build(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout = process.stdout.decode() - print(stdout) - - output_json = json.loads(stdout) - - assert output_json == EXPECTED_JSON From 433e9fb4d3315ea9cf2fbf898ae0b920749baed3 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Thu, 5 Aug 2021 17:04:21 +0300 Subject: [PATCH 08/11] Added typing --- src/python/review/reviewers/utils/print_review.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/review/reviewers/utils/print_review.py b/src/python/review/reviewers/utils/print_review.py index 23c5b839..66f1a67f 100644 --- a/src/python/review/reviewers/utils/print_review.py +++ b/src/python/review/reviewers/utils/print_review.py @@ -53,11 +53,11 @@ def print_review_result_as_text(review_result: GeneralReviewResult, path: Path, print(review_result.quality, end='') -def _get_quality_without_penalty(review_result: ReviewResult): +def _get_quality_without_penalty(review_result: ReviewResult) -> QualityType: return review_result.quality.quality_type -def _get_quality_with_penalty(review_result: ReviewResult): +def _get_quality_with_penalty(review_result: ReviewResult) -> QualityType: quality_without_penalty = _get_quality_without_penalty(review_result) return review_result.punisher.get_quality_with_penalty(quality_without_penalty) From b497cb852ea4a2251750553cd53fe119b84eff23 Mon Sep 17 00:00:00 2001 From: Ilya Vlasov <55441714+GirZ0n@users.noreply.github.com> Date: Tue, 10 Aug 2021 11:53:23 +0300 Subject: [PATCH 09/11] Update README.md --- src/python/evaluation/plots/README.md | 58 ++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/src/python/evaluation/plots/README.md b/src/python/evaluation/plots/README.md index 6c8c3f11..3608e757 100644 --- a/src/python/evaluation/plots/README.md +++ b/src/python/evaluation/plots/README.md @@ -1,7 +1,7 @@ # Hyperstyle evaluation: plots -This module allows you to visualize the data obtained with the [inspectors](../inspectors) module +This module allows you to visualize the data. -## [diffs_plotter.py](diffs_plotter.py) +## Diffs plotter This script allows you to visualize a dataset obtained with [diffs_between_df.py](../inspectors/diffs_between_df.py). The script can build the following charts: @@ -83,3 +83,57 @@ The result will be four graphs (`unique_issues_by_category`, `unique_penalty_iss #### Distribution of influence on penalty by category + +## Raw issues statistics plotter +This script allows you to visualize a dataset obtained with [get_raw_issues_statistics.py](../issues_statistics/get_raw_issues_statistics.py). + +The script can build the following charts: +* Line chart ([Example](#number-of-unique-issues-by-category)) # TODO +* Box plot ([Example](#number-of-issues-by-category)) # TODO +* Histogram ([Example](#number-of-unique-penalty-issues-by-category)) # TODO + +### Usage +Run the [diffs_plotter.py](diffs_plotter.py) with the arguments from command line. + +**Required arguments**: +1. `stats` — path to a file with stats that were founded by [get_raw_issues_statistics.py](../issues_statistics/get_raw_issues_statistics.py). +2. `save_dir` — directory where the plotted charts will be saved. +3. `config_path` — path to the yaml file containing information about the charts to be plotted. A description of the config and its example is provided in [this section](#config). + +**Optional arguments**: + +Argument | Description +--- | --- +**‑‑file‑extension** | Allows you to select the extension of output files. Available extensions: `.png`, `.jpg`, `.jpeg`, `.webp`, `.svg`, `.pdf`, `.eps`, `.json`. Default is `.svg`. + +### Config +The configuration file is a dictionary in yaml format, where for each column of the original dataset the types of graphs to be plotted are specified. You can also put the common parameters when plotting multiple graphs for one column in a separate `common` group. + +**Possible values of the charts**: +* `line_chart` +* `histogram` +* `box_plot` + +**Possible parameters**: +Parametr | Description +---|--- +**x_axis_name** | Name of the x-axis. The default value depends on the type of chart. +**y_axis_name** | Name of the y-axis. The default value depends on the type of chart. +**boundaries** | Dictionary consisting of pairs `boundary value`: `boundary name` (boundary name may not exist). Allows to draw vertical or horizontal lines on graphs (depending on the type of plot). By default, the boundaries are not drawn. +**range_of_values** | Allows you to filter the values. It is an array of two values: a and b. Only values that belong to the range [a, b) are taken into account when plotting. By default, all values are taken into account when plotting. +**margin** | Defines the outer margin on all four sides of the chart. The available values are specified in the Enum class `MARGIN` from [plots const file](./common/plotly_consts.py). If not specified, the default value provided by Plotly is used. +**sort_order** | Defines the sorting order of the chart. The available values are specified in the Enum class `SORT_ORDER` from [plots const file](./common/plotly_consts.py). If not specified, the default value provided by Plotly is used. +**color** | Defines the color of the chart. The available values are specified in the Enum class `COLOR` from [plots const file](./common/plotly_consts.py). If not specified, the default value provided by Plotly is used. +**n_bins** | Allows you to adjust the number of bins when plotting a box plot. By default, this value is set by Plotly. + +#### Example of config +```yaml +CYCLOMATIC_COMPLEXITY: + line_chart: + x_axis_name: Cyclomatic complexity value + histigram: + common: + range_of_values: [0, 20] +``` + +The result will be two graphs: line chart and histogram. The values in both charts will be between 0 and 19 inclusive. In the line chart the x-axis will be named "Cyclomatic complexity value". From c1cb0412c341395a39ec995888b6ce12f7e2effa Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Tue, 10 Aug 2021 12:00:30 +0300 Subject: [PATCH 10/11] Undo Update README.md --- src/python/evaluation/plots/README.md | 58 +-------------------------- 1 file changed, 2 insertions(+), 56 deletions(-) diff --git a/src/python/evaluation/plots/README.md b/src/python/evaluation/plots/README.md index 3608e757..6c8c3f11 100644 --- a/src/python/evaluation/plots/README.md +++ b/src/python/evaluation/plots/README.md @@ -1,7 +1,7 @@ # Hyperstyle evaluation: plots -This module allows you to visualize the data. +This module allows you to visualize the data obtained with the [inspectors](../inspectors) module -## Diffs plotter +## [diffs_plotter.py](diffs_plotter.py) This script allows you to visualize a dataset obtained with [diffs_between_df.py](../inspectors/diffs_between_df.py). The script can build the following charts: @@ -83,57 +83,3 @@ The result will be four graphs (`unique_issues_by_category`, `unique_penalty_iss #### Distribution of influence on penalty by category - -## Raw issues statistics plotter -This script allows you to visualize a dataset obtained with [get_raw_issues_statistics.py](../issues_statistics/get_raw_issues_statistics.py). - -The script can build the following charts: -* Line chart ([Example](#number-of-unique-issues-by-category)) # TODO -* Box plot ([Example](#number-of-issues-by-category)) # TODO -* Histogram ([Example](#number-of-unique-penalty-issues-by-category)) # TODO - -### Usage -Run the [diffs_plotter.py](diffs_plotter.py) with the arguments from command line. - -**Required arguments**: -1. `stats` — path to a file with stats that were founded by [get_raw_issues_statistics.py](../issues_statistics/get_raw_issues_statistics.py). -2. `save_dir` — directory where the plotted charts will be saved. -3. `config_path` — path to the yaml file containing information about the charts to be plotted. A description of the config and its example is provided in [this section](#config). - -**Optional arguments**: - -Argument | Description ---- | --- -**‑‑file‑extension** | Allows you to select the extension of output files. Available extensions: `.png`, `.jpg`, `.jpeg`, `.webp`, `.svg`, `.pdf`, `.eps`, `.json`. Default is `.svg`. - -### Config -The configuration file is a dictionary in yaml format, where for each column of the original dataset the types of graphs to be plotted are specified. You can also put the common parameters when plotting multiple graphs for one column in a separate `common` group. - -**Possible values of the charts**: -* `line_chart` -* `histogram` -* `box_plot` - -**Possible parameters**: -Parametr | Description ----|--- -**x_axis_name** | Name of the x-axis. The default value depends on the type of chart. -**y_axis_name** | Name of the y-axis. The default value depends on the type of chart. -**boundaries** | Dictionary consisting of pairs `boundary value`: `boundary name` (boundary name may not exist). Allows to draw vertical or horizontal lines on graphs (depending on the type of plot). By default, the boundaries are not drawn. -**range_of_values** | Allows you to filter the values. It is an array of two values: a and b. Only values that belong to the range [a, b) are taken into account when plotting. By default, all values are taken into account when plotting. -**margin** | Defines the outer margin on all four sides of the chart. The available values are specified in the Enum class `MARGIN` from [plots const file](./common/plotly_consts.py). If not specified, the default value provided by Plotly is used. -**sort_order** | Defines the sorting order of the chart. The available values are specified in the Enum class `SORT_ORDER` from [plots const file](./common/plotly_consts.py). If not specified, the default value provided by Plotly is used. -**color** | Defines the color of the chart. The available values are specified in the Enum class `COLOR` from [plots const file](./common/plotly_consts.py). If not specified, the default value provided by Plotly is used. -**n_bins** | Allows you to adjust the number of bins when plotting a box plot. By default, this value is set by Plotly. - -#### Example of config -```yaml -CYCLOMATIC_COMPLEXITY: - line_chart: - x_axis_name: Cyclomatic complexity value - histigram: - common: - range_of_values: [0, 20] -``` - -The result will be two graphs: line chart and histogram. The values in both charts will be between 0 and 19 inclusive. In the line chart the x-axis will be named "Cyclomatic complexity value". From 1eebc0aaa0d03ef848f3ca65360b6888ba406fba Mon Sep 17 00:00:00 2001 From: Ilya Vlasov Date: Tue, 10 Aug 2021 13:11:27 +0300 Subject: [PATCH 11/11] Fixed PR issues --- src/python/review/reviewers/review_result.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/python/review/reviewers/review_result.py b/src/python/review/reviewers/review_result.py index 5853864f..93a6e9e4 100644 --- a/src/python/review/reviewers/review_result.py +++ b/src/python/review/reviewers/review_result.py @@ -9,6 +9,9 @@ @dataclass class ReviewResult: + """ + ReviewResult contains a list of issues, as well as quality and punisher obtained with these issues. + """ quality: Quality punisher: Punisher issues: List[BaseIssue] @@ -16,9 +19,15 @@ class ReviewResult: @dataclass class FileReviewResult(ReviewResult): + """ + FileReviewResult contains the information needed to output about a particular inspected file. + """ file_path: Path @dataclass class GeneralReviewResult(ReviewResult): + """ + GeneralReviewResult contains the information needed to output about the entire inspected project. + """ file_review_results: List[FileReviewResult]