diff --git a/.dotstop.dot b/.dotstop.dot index 0fba9e5fb3..c6ae574271 100644 --- a/.dotstop.dot +++ b/.dotstop.dot @@ -56,6 +56,7 @@ digraph G { "JLS-24" [sha=b16224d3ab676c00b313ae91760072d92aed9f20da99b363621effa3e033e012]; "JLS-25" [sha="8bb517191450f370679dbafd85342e1bbcf797cc84f2a6f1fc119568b534d5e0"]; "JLS-26" [sha=cf1b73b375697ee56d9788aab79ed01b2730b126a2cc4d7041c9525113e7ed7c]; +"JLS-27" [sha="efd4b438331c155eebaec96cd1eda337567794f8696b327562aaaed5fa8ded69"]; "NJF-01" [sha="548dc86014e093974f68660942daa231271496a471885bbed092a375b3079bd8"]; "NJF-02" [sha="6ea015646d696e3f014390ff41612eab66ac940f20cf27ce933cbadf8482d526"]; "NJF-03" [sha="4bd1f8210b7bba9a248055a437f377d9da0b7576c5e3ed053606cf8b5b2febe3"]; @@ -356,6 +357,7 @@ digraph G { "TA-BEHAVIOURS" -> "AOU-23" [sha=""]; "TA-BEHAVIOURS" -> "AOU-24" [sha=""]; "TA-BEHAVIOURS" -> "AOU-25" [sha=""]; +"TA-BEHAVIOURS" -> "JLS-27" [sha="9c3e4b4a4677ca22f28e2bb0cbc39c31676efa4c17ddc1fe66599589b83ef643"]; "TA-CONFIDENCE" -> "JLS-08" [sha="506164051180023c8533ea1f6dedf1bad894c3ee6020ff16b002e33b109c2791"]; "TA-CONFIDENCE" -> "JLS-09" [sha="80bbde95fc14f89acf3dad10b3831bc751943fe4a1d79d5cbf4702416c27530f"]; "TA-CONFIDENCE" -> "AOU-10_COMBINED" [sha="5e5d7dc606d53423fbb1f2d5755780c98839bdc2d108704af5ee1aed50403f5e"]; diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index ca0cb612a7..ae14fb01ad 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -14,9 +14,9 @@ Custom references are defined in `references.py`. A (custom) reference is used b ## CPPTestReference -The content of a `CPPTestReference` is given by the lines of code corresponding to a test-case or a section of a test-case in the unit-tests given in tests/src and TSF/tests. +The content of a `CPPTestReference` is given by the lines of code corresponding to a test-case or a section of a test-case in a specified unit-test-file. The sections are identified in the value of "name", where the nested sections are separated by semicolons. -For the `CPPTestReference` an example is: +For the `CPPTestReference` the expected configuration is: ``` --- ... @@ -30,9 +30,12 @@ references: ## JSONTestsuiteReference -The content of a `JSONTestsuiteReference` is given by the lines of code corresponding to a test-case or a section of a test-case in the unit tests, where a (list of) specified test-file(s) located on an external test-repository is utilized, and the content of these test-files. +The `JSONTestsuiteReference` is a variant of the function reference, which is augmented by an external file containing test-data in the form of well- or ill-formed JSON candidate data. +A `JSONTestsuiteReference` is therefore given by the data of a `CPPTestReference` together with a list containing the paths to these external files. +The external files are stored in a separate branch of the repository, and their text is loaded via call to github. +The content of a `JSONTestsuiteReference` is given by the content of the underlying `CPPTestReference` together with the sum of the contents of the external test-suite files. -For the `JSONTestsuiteReference` an example is: +For the `JSONTestsuiteReference` the expected configuration is: ``` --- ... @@ -65,7 +68,8 @@ references: --- ``` -Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter; additionally, it is possible to give a description. The full example is: +Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter. +The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation when counting the occurrences from top to bottom of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: ``` --- ... @@ -140,14 +144,6 @@ The content of a `TimeVaryingWebReference` is given by the content of a changelo An example of the complete configuration for `TimeVaryingWebReference` is -in case of a custom description. - -## TimeVaryingWebReference - -The content of a `TimeVaryingWebReference` is given by the content of a changelog, whose default value is `ChangeLog.md`, which mirrors the changelog of nlohmann/json. This reference is intended for websites, whose content is constantly changing, so that a `WebContentReference` makes the item un-reviewable, but whose content at the time of an update influences the trustability. An example is `https://github.com/nlohmann/json/pulse/monthly`, which can be used to demonstrate that nlohmann/json is *up to the most recent version* under active development. - -An example of the complete configuration for `TimeVaryingWebReference` is - ``` --- ... @@ -192,6 +188,26 @@ references: --- ``` +## workflow_failures + +This reference queries `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure+branch%3A{self._branch}` and collects the number of failed workflow runs as its content. +Here, owner, repo and branch are the arguments given to the constructor of the reference. +If no branch is specified, then all failures are collected, i.e. `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure` is queried. +In case the website is un-reachable, or the github layout changes drastically so that the number of failed workflow runs does not exist at the expected location, an error is thrown. + +The expected configuration is + +``` +--- +... +references: +- type: workflow_failures + owner: "eclipse-score" + repo: "inc_nlohmann_json" + branch: "json_version_3_12_0" +--- +``` + ## ItemReference Some references support every (directly or indirectly) supporting item of an item. @@ -281,7 +297,7 @@ The test-files are called unit-FILE_NAME.cpp. In the configuration, FILE_NAME is For each test specified in test-files, the number of passed and failed test-cases is calculated, while the number of skipped test-cases is ignored. The score of each test is then the ratio of passed test-cases compared to all non-skipped test-cases; the total score is the mean of the individual scores. -## issue_checker +## check_issues The automatic validator `check_issues` is intended to evaluate the json-lists `raw_open_issues.json` and `raw_closed_issues.json` and compare with the list of known issues of nlohmann/json labelled as bug opened since the release of the version of nlohmann/json that is documented. The json lists are generated in the publish_documentation-Workflow, and not persistently stored. @@ -304,6 +320,100 @@ From `raw_closed_issues.json`, all issue IDs are collected, which are labelled a If for any of these IDs, it is not explicitly indicated in the list of known misbehaviours that this issue does not apply to Eclipse S-CORE, then the score 0.0 is returned. Otherwise, the score 1.0 is assigned. +## did_workflows_fail + +The automatic validator `did_workflows_fail` queries the web-site `https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure+branch%3A{branch}` and looks on the number of workflow run results which is printed at the head of the table. +In case that this number is not zero, a score of 0.0 is returned, and 1.0 otherwise. + +The expected configuration is given as follows: + +``` +evidence: + type: did_workflows_fail + configuration: + owner: "eclipse-score" # owner of the repository + repo: "inc_nlohmann_json" # name of the repository + branch: "json_version_3_12_0" # name of the branch + action: "push" # optional, default is push +``` + +It is of utmost importance that the arguments come with quotation marks. Otherwise, the update helper does not work as intended. + +## coveralls_reporter + +The automatic validator `coveralls_reporter` queries the [coveralls](https://coveralls.io/) api to get the line and branch coverages calculated by the service, which is running on the repository. +Unless the version of `nlohmann/json` documented in this repository changes, it is expected that both coverage numbers remain constant. +When initialising the reference, the current code coverage is given as a parameter, to which the fetched coverages are compared. +If no branch is specified, then the most recently calculated coverage is fetched, so that it is generally recommended to specify a branch. +Moreover, it is possible to specify the number of decimal digits, which is defaulted to three, when not specified. +The validator returns a score of 1.0 if both fetched coverages rounded to the specified number of decimal digits coincide with the specified ones, and a score of 0.0 otherwise. + +The expected configuration is the following: + +``` +evidence: + type: coveralls_reporter + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 +``` + +## combinator + +The trudag tool does currently not support the use of multiple custom validators for one single TSF item. To work around this, the validator `combinator` is implemented as a meta-validator that executes multiple validators and combines their scores using a weighted average. This enables the validation of complex trustable items that require evidence from multiple sources or validation methods. + +The combinator accepts a list of validators, each with its own configuration and optional weight. Each validator is executed independently, and their scores are combined using the formula: `(score1 * weight1 + score2 * weight2 + ...) / (weight1 + weight2 + ...)`. If no weights are specified, all validators are treated with equal weight (weight = 1.0). + +The combinator supports the following validator types: +- `check_artifact_exists` +- `https_response_time` +- `check_test_results` +- `file_exists` +- `sha_checker` +- `check_issues` +- `did_workflows_fail` +- `coveralls_reporter` + +The expected configuration is as follows: + +``` +evidence: + type: combinator + configuration: + validators: + - type: "check_test_results" + weight: 2.0 # optional, defaults to 1.0 + configuration: + tests: + - class_lexer + - unicode1 + - type: "https_response_time" + weight: 1.0 # optional, defaults to 1.0 + configuration: + target_seconds: 2 + urls: + - "https://github.com/nlohmann/json/issues" + - type: "coveralls_reporter" + weight: 1.5 # optional, defaults to 1.0 + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 + - type: "did_workflows_fail" + configuration: + owner: "eclipse-score" + repo: "inc_nlohmann_json" + branch: "json_version_3_12_0" +``` + +All weights must be non-negative. If the sum of all weights is zero, the combinator returns a score of 0.0. The combinator aggregates all exceptions and warnings from the individual validators and returns them alongside the combined score. # Data store interface diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index 3865fbb0e4..80fb96d76e 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -3,6 +3,7 @@ from trudag.dotstop.core.reference.references import SourceSpanReference import requests import sqlite3 +import re # Constants MAX_JSON_LINES_FOR_DISPLAY = 25 @@ -545,7 +546,270 @@ def __str__(self) -> str: # this is used as a title in the trudag report return f"function: [{self._name}]\n({str(self.path)})" -from trudag.dotstop.core.reference.references import LocalFileReference as LFR +class ListOfTestCases(BaseReference): + + def __init__(self, test_files: list[str], recent_result_database: str = "artifacts/MemoryEfficientTestResults.db", recent_result_table: str = "test_results") -> None: + self._test_files = test_files + self._database = recent_result_database + self._table = recent_result_table + + @staticmethod + def compile_string(items: list[str]) -> str: + # input: list of strings representing the structure of TEST_CASE, SECTION etc., + # e.g. items = ["lexer class", "scan", "literal names"] + # output: the last item of the list, representing the most recent SECTION, + # indented as in the source code + # throws error if input is empty + if len(items) == 0: + raise RuntimeError("Received empty structural list; nonempty list expected.") + result = "" + for _ in range(1, len(items)): + result += " " + if items: + result += "* " + items[-1] + return result + + @staticmethod + def extract_quotation(s: str) -> str: + # input: string containing at least one quoted substring, e.g. s = "my \"input\"" + # output: the first quoted substring of the input + # throws error if no quoted substring can be found. + first = s.find('"') + if first == -1: + raise RuntimeError("Expected quotation mark; none were detected.") + second = s.find('"', first + 1) + if second == -1: + raise RuntimeError("Expected quotation marks; only one was detected.") + return s[first + 1 : second] + + @staticmethod + def remove_and_count_indent(s: str) -> tuple[int, str]: + # input: string with possibly leading whitespace (space of horizontal tab) + # output: the number of leading spaces and the string with leading whitespace removed; + # tab counted as four spaces + cnt = 0 + i = 0 + n = len(s) + while i < n and (s[i] == " " or s[i] == "\t"): + if s[i] == " ": + cnt += 1 + elif s[i] == "\t": + cnt += 4 + i += 1 + return (cnt, s[i:]) + + @staticmethod + def head_of_list() -> str: + return """## List of all unit-tests with test environments + +This list contains all unit-tests possibly running in this project. +These tests are compiled from the source-code, where the individual unit-tests are arranged in TEST_CASEs containing possibly nested SECTIONs. +To reflect the structure of the nested sections, nested lists are utilised, where the top-level list represents the list of TEST_CASEs. + +It should be noted that not all unit-tests in a test-file are executed with every compiler-configuration. +""" + + @staticmethod + def transform_test_file_to_test_name(test_file: str) -> str: + return "test-"+"-".join((test_file.split('.')[0]).split('-')[1:]) + + @classmethod + def type(cls) -> str: + return "list_of_test_cases" + + def extract_test_structure(self, file_path: Path) -> str: + # input: path to a file potentially containing unit-tests + # output: the extracted arrangement of TEST_CASE and SECTION + # in the form of nested markdown lists + + indent = 0 # the indent of the currently read line + current_indent = 0 # the indent of the last TEST_CASE or SECTION + current_path = [] # the current path + lines_out = [] # the collection of lines to be outputted + + # open file_path as read-only, and process line by line + with file_path.open("r", encoding="utf-8", errors="replace") as source: + for line in source: + # count and remove leading whitespace + indent, trimmed = self.remove_and_count_indent(str(line)) + + # check whether we have found a TEST_CASE + if trimmed.startswith("TEST_CASE(") or trimmed.startswith("TEST_CASE_TEMPLATE(") or trimmed.startswith("TEST_CASE_TEMPLATE_DEFINE("): + # remember the current indent + current_indent = indent + # TEST_CASE is always the head of a new arrangement-structure + # remove stored structure + current_path.clear() + # extract name of TEST_CASE and append path + current_path.append(self.extract_quotation(trimmed)) + lines_out.append(self.compile_string(current_path)) + + # check whether we have found a SECTION + if trimmed.startswith("SECTION("): + # update path to reflect arrangement of current section + while indent <= current_indent and current_path: + current_path.pop() + current_indent -= 4 + # remember the current indent + current_indent = indent + # extract name of SECTION and append path + current_path.append(self.extract_quotation(trimmed)) + lines_out.append(self.compile_string(current_path)) + + # process extracted lines + return ("\n".join(lines_out) + "\n") if lines_out else "" + + def extract_recent_test_environments(self) -> dict: + """ + Extract recent test environment information from the test results database. + + This method connects to the SQLite database specified in self._database and queries + the table specified in self._table to retrieve information about test environments + where unit tests were executed. It categorizes the results into tests that ran + without skipping any test cases ('noskip') and tests where some cases were skipped ('skip'). + + The database is expected to have a table with columns: + - name: test file name (e.g., "test-example") + - compiler: compiler used (e.g., "gcc", "clang") + - cpp_standard: C++ standard used (e.g., "c++17", "c++20") + - skipped_cases: number of test cases that were skipped (0 means no skips) + + Returns: + dict: A dictionary where keys are test case names and values are dictionaries containing: + - "noskip": list of environments where all tests ran (no skipped cases) + - "skip": list of environments where some tests were skipped + Each environment entry contains compiler, standard, and (for skip) skipped count. + + Raises: + RuntimeError: If the database cannot be accessed or the expected table doesn't exist + """ + fetched_data = dict() + connector = None + try: + # initialise connection to test result database + connector = sqlite3.connect(self._database) + cursor = connector.cursor() + # verify that the expected table does exist + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name = ?;",(self._table,)) + if cursor.fetchone() is None: + raise RuntimeError(f"Fatal Error: Could not find table {self._table} in database {self._database}.") + + # get all test-files from recent test executions + command = f"SELECT name FROM {self._table};" + cursor.execute(command) + raw_cases = cursor.fetchall() + cases = set([raw_case[0] for raw_case in raw_cases]) + # for each test-file + for case in cases: + case_data = dict() + # get the test-environments + command = f"SELECT compiler, cpp_standard FROM {self._table} WHERE name = ? and skipped_cases == 0" + cursor.execute(command,(case,)) + results = cursor.fetchall() + case_data["noskip"] = [{"compiler":result[0], "standard":result[1]} for result in results] + # some test-cases are skipped with certain environments + # It is unclear from the log, which cases are skipped; + # we leave this to the interested reader + command = f"SELECT compiler, cpp_standard, skipped_cases FROM {self._table} WHERE name = ? and skipped_cases != 0" + cursor.execute(command, (case,)) + results = cursor.fetchall() + case_data["skip"] = [{"compiler": result[0], "standard": result[1], "skipped": result[2]} for result in results] + fetched_data[case] = case_data + except sqlite3.Error as e: + raise RuntimeError(f"Fatal Error accessing database {self._database}: {e}") + finally: + if connector: + connector.close() + return fetched_data + + def fetch_all_test_data(self, input: list[str]): + """ + Extract and compile test structure information from C++ test files along with execution environment data. + + This method processes a list of file or directory paths to find C++ unit test files (matching pattern + "unit-*.cpp"), extracts their TEST_CASE and SECTION structure, and combines this with recent test + execution environment information from the database to generate a comprehensive markdown report. + + The method recursively searches directories for test files, parses each file to extract the hierarchical + test structure (TEST_CASE containing nested SECTIONs), and correlates this with historical execution + data to show which compiler/standard combinations were used to run the tests. + + Args: + input: List of file or directory paths to process. Files must match "unit-*.cpp" pattern. + Directories are recursively searched for matching test files. + + Returns: + str: A markdown-formatted report containing: + - Header explaining the test structure format + - For each test file: nested bullet lists showing TEST_CASE and SECTION hierarchy + - Execution environment information showing which compiler/standard combinations + successfully ran all tests vs. which had some test cases skipped + - Notes about files that appear to have no recent execution history + + Note: + The method relies on extract_recent_test_environments() to get database information + and extract_test_structure() to parse individual test files. Test file names are + transformed using transform_test_file_to_test_name() to match database entries. + """ + # inputs: path(s) to directory potentially containing some test-data + extracted_test_data = [] + recent_test_data = self.extract_recent_test_environments() + for arg in input: + p = Path(arg) + if p.is_file() and p.suffix == ".cpp" and p.name.startswith("unit-"): + extracted_test_data.append((p.name,self.extract_test_structure(p))) + elif p.is_dir(): + for entry in p.rglob("*"): + if entry.is_file() and entry.suffix == ".cpp" and entry.name.startswith("unit-"): + extracted_test_data.append((entry.name,self.extract_test_structure(entry))) + extracted_test_data.sort(key= lambda x: x[0]) + result = self.head_of_list() + for test_file, list_of_tests in extracted_test_data: + result += f"\n\n### List of tests in file {test_file}\n\n" + result += list_of_tests + result += "\n\n" + if recent_test_data.get(self.transform_test_file_to_test_name(test_file), None) is None: + result += "Unfortunately, none of the following tests seems to have been executed. Very strange indeed!\n\n" + else: + if recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("noskip",None) is not None: + if len(recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("noskip")) != 0: + result += "\nAll tests in this file were run in the following configurations:\n\n" + for datum in recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("noskip"): + result += "* " + result += datum.get("compiler",None) + result += " with standard " + result += datum.get("standard",None) + result += "\n" + if recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("skip",None) is not None: + if len(recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("skip")) != 0: + result += "\nIn the following configuration, however, some test-cases were skipped:\n\n" + for datum in recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("skip"): + result += "* " + how_many = datum.get("skipped",None) + result += str(how_many) + if how_many == 1: + result += " test case was skipped when using " + else: + result += " test cases were skipped when using " + result += datum.get("compiler",None) + result += " with standard " + result += datum.get("standard",None) + result += "\n" + return result + + @property + def content(self) -> bytes: + # encoding is necessary since content will be hashed + return self.fetch_all_test_data(self._test_files).encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + return self.content.decode('utf-8') + + def __str__(self) -> str: + # this is used as a title in the trudag report + return "List of all unit-tests" + +from trudag.dotstop.core.reference.references import LocalFileReference as LFR class VerboseFileReference(LFR): def __init__(self, path: str, description: str = "", **kwargs) -> None: @@ -574,7 +838,73 @@ def as_markdown(self, filepath: None | str = None) -> str: def __str__(self) -> str: return str(self._path) +class Checklist(LFR): + def __init__(self, path: str, **kwargs) -> None: + self._path = Path(path) + + @classmethod + def type(cls) -> str: + return "checklist" + + @property + def content(self) -> bytes: + if not self._path.is_file(): + raise ReferenceError( + f"Cannot get non-existent or non-regular file {self._path}" + ) + with self._path.open("rb") as reference_content: + return reference_content.read() + + def as_markdown(self, filepath: None | str = None) -> str: + return self.content.decode('utf-8') + + def __str__(self) -> str: + return str(self._path) + del LFR + +class workflow_failures(BaseReference): + def __init__(self, owner: str, repo: str, branch: str | None = None) -> None: + self._owner = owner + self._repo = repo + self._branch = branch + + @classmethod + def type(cls) -> str: + return "workflow_failures" + + @property + def content(self) -> bytes: + # build the url + url = f"https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure" + if self._branch is not None: + url += f"+branch%3A{self._branch}" + # ask the website + res = requests.get(url) + # if call is not successful, raise an error + if res.status_code != 200: + candidate = f"The url {url} is not reachable, so that the number of failed workflows can not be fetched!" + raise RuntimeError(candidate) + # otherwise fetch the number printed in the head of the table + m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) + if m is None: + candidate = f"The number of failed workflows can not be found, please check that the table head contains \"XX workflow run results\"!" + raise RuntimeError(candidate) + return m.group(1).encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + if self._branch is None: + return f"{self.content.decode('utf-8')} workflows failed on {self._owner}/{self._repo}" + else: + return f"{self.content.decode('utf-8')} workflows failed on branch {self._branch} of {self._owner}/{self._repo}" + + def __str__(self) -> str: + # this is used as a title in the trudag report + if self._branch is not None: + result = f"failures on branch {self._branch} of {self._owner}/{self._repo}" + else: + result = f"failures on {self._owner}/{self._repo}" + return result class ItemReference(BaseReference): def __init__(self, items: list[str]) -> None: @@ -621,7 +951,3 @@ def __str__(self): title += "items " title += ", ".join(self._items) return title - - - - diff --git a/.dotstop_extensions/test_references.py b/.dotstop_extensions/test_references.py index f558fc80f8..71f8ce06a5 100644 --- a/.dotstop_extensions/test_references.py +++ b/.dotstop_extensions/test_references.py @@ -2,7 +2,7 @@ import tempfile from pathlib import Path from unittest.mock import patch -from references import CPPTestReference, JSONTestsuiteReference, FunctionReference, ItemReference +from references import CPPTestReference, JSONTestsuiteReference, FunctionReference, ItemReference, ListOfTestCases from validators import file_exists @@ -697,3 +697,162 @@ def test_file_exists(tmp_path): assert score == 2/4 assert any(isinstance(exception,Warning) for exception in exceptions) assert any(isinstance(exception,RuntimeError) for exception in exceptions) + +# ListOfTestCases tests +@pytest.fixture +def sample_unit_test_content(): + """Sample unit test file content for testing ListOfTestCases.""" + return '''TEST_CASE("basic arithmetic") +{ + SECTION("addition") + { + CHECK(1 + 1 == 2); + + SECTION("positive numbers") + { + CHECK(5 + 3 == 8); + } + } + + SECTION("multiplication") + { + CHECK(2 * 3 == 6); + } +} + +TEST_CASE("another test") +{ + CHECK(true); +} +''' + +@pytest.fixture +def temp_unit_test_file(sample_unit_test_content, tmp_path): + """Create a temporary unit test file.""" + test_file = tmp_path / "unit-sample.cpp" + test_file.write_text(sample_unit_test_content) + return test_file + +def test_list_of_test_cases_type_classmethod(): + """Test the type class method.""" + assert ListOfTestCases.type() == "list_of_test_cases" + +def test_compile_string(): + """Test compile_string static method.""" + # Test single item + result = ListOfTestCases.compile_string(["test_case"]) + assert result == "* test_case" + + # Test nested items + result = ListOfTestCases.compile_string(["test_case", "section1", "section2"]) + assert result == " * section2" + + # Test proper indentation + result = ListOfTestCases.compile_string(["test_case", "section"]) + assert result == " * section" + +def test_compile_string_empty_list(): + """Test compile_string with empty list raises error.""" + with pytest.raises(RuntimeError, match="Received empty structural list; nonempty list expected."): + ListOfTestCases.compile_string([]) + +def test_extract_quotation(): + """Test extract_quotation static method.""" + # Test basic extraction + result = ListOfTestCases.extract_quotation('TEST_CASE("my test")') + assert result == "my test" + + # Test with multiple quotes (should return first) + result = ListOfTestCases.extract_quotation('SECTION("section1") and "section2"') + assert result == "section1" + +def test_extract_quotation_no_quotes(): + """Test extract_quotation with no quotes raises error.""" + with pytest.raises(RuntimeError, match="Expected quotation mark; none were detected."): + ListOfTestCases.extract_quotation("no quotes here") + +def test_extract_quotation_single_quote(): + """Test extract_quotation with single quote raises error.""" + with pytest.raises(RuntimeError, match="Expected quotation marks; only one was detected."): + ListOfTestCases.extract_quotation('single quote"') + +def test_remove_and_count_indent(): + """Test remove_and_count_indent static method.""" + # Test spaces + count, text = ListOfTestCases.remove_and_count_indent(" hello") + assert count == 4 + assert text == "hello" + + # Test tabs (4 spaces each) + count, text = ListOfTestCases.remove_and_count_indent("\t\thello") + assert count == 8 + assert text == "hello" + + # Test mixed tabs and spaces + count, text = ListOfTestCases.remove_and_count_indent("\t hello") + assert count == 6 + assert text == "hello" + + # Test no indentation + count, text = ListOfTestCases.remove_and_count_indent("hello") + assert count == 0 + assert text == "hello" + +def test_head_of_list(): + """Test head_of_list static method.""" + result = ListOfTestCases.head_of_list() + assert "## List of all unit-tests with test environments" in result + assert "TEST_CASEs" in result + assert "SECTIONs" in result + +def test_transform_test_file_to_test_name(): + """Test transform_test_file_to_test_name static method.""" + result = ListOfTestCases.transform_test_file_to_test_name("unit-example-test.cpp") + assert result == "test-example-test" + + result = ListOfTestCases.transform_test_file_to_test_name("unit-simple.cpp") + assert result == "test-simple" + +def test_extract_test_structure(temp_unit_test_file): + """Test extract_test_structure method.""" + list_ref = ListOfTestCases([]) + result = list_ref.extract_test_structure(temp_unit_test_file) + + # Should contain test cases and sections + assert "* basic arithmetic" in result + assert " * addition" in result + assert " * positive numbers" in result + assert " * multiplication" in result + assert "* another test" in result + +def test_extract_test_structure_empty_file(tmp_path): + """Test extract_test_structure with empty file.""" + empty_file = tmp_path / "empty.cpp" + empty_file.write_text("") + + list_ref = ListOfTestCases([]) + result = list_ref.extract_test_structure(empty_file) + assert result == "" + +def test_list_of_test_cases_init(): + """Test ListOfTestCases initialization.""" + test_files = ["tests/unit-test1.cpp", "tests/unit-test2.cpp"] + list_ref = ListOfTestCases(test_files, "custom.db", "custom_table") + + assert list_ref._test_files == test_files + assert list_ref._database == "custom.db" + assert list_ref._table == "custom_table" + +def test_list_of_test_cases_init_defaults(): + """Test ListOfTestCases initialization with default parameters.""" + test_files = ["tests/unit-test1.cpp"] + list_ref = ListOfTestCases(test_files) + + assert list_ref._test_files == test_files + assert list_ref._database == "artifacts/MemoryEfficientTestResults.db" + assert list_ref._table == "test_results" + +def test_str_method(): + """Test __str__ method.""" + list_ref = ListOfTestCases(["test_file"]) + assert str(list_ref) == "List of all unit-tests" diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index 8567e20be3..2f5321cfef 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -11,6 +11,8 @@ from TSF.scripts.generate_list_of_tests import ListOfTestsGenerator import hashlib import json +import re +import subprocess yaml: TypeAlias = str | int | float | list["yaml"] | dict[str, "yaml"] @@ -146,7 +148,7 @@ def check_test_results(configuration: dict[str, yaml]) -> tuple[float, list[Exce # get the test-names raw_tests = configuration.get("tests",None) if raw_tests is None: - return(1.0, Warning("Warning: No tests specified! Assuming absolute trustability!")) + return(1.0, [Warning("Warning: No tests specified! Assuming absolute trustability!")]) # process test-names tests = [] for test in raw_tests: @@ -292,12 +294,12 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception # get relevant release date release_date = configuration.get("release_date",None) if release_date is None: - return (0.0, RuntimeError("The release date of the most recent version of nlohmann/json is not specified.")) + return (0.0, [RuntimeError("The release date of the most recent version of nlohmann/json is not specified.")]) else: try: release_time = datetime.strptime(release_date,"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).timestamp() except: - return(0.0, RuntimeError("The format of the release date is to be %Y-%m-%dT%H:%M:%SZ")) + return(0.0, [RuntimeError("The format of the release date is to be %Y-%m-%dT%H:%M:%SZ")]) # get path to static list of misbehaviours raw_known_misbehaviours = configuration.get("list_of_known_misbehaviours",None) # parse list of inapplicable misbehaviours @@ -332,7 +334,7 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception return(0.0, [RuntimeError("The list of open issues could not be extracted.")]) for issue in relevant_open_issues: if issue not in inapplicable_misbehaviours and issue is not None: - return(0.0,[]) + return(0.0, []) # parse raw list of closed misbehaviours try: with open("raw_closed_issues.json") as list_2: @@ -350,6 +352,144 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception return(0.0, [RuntimeError("The list of closed issues could not be extracted.")]) for issue in relevant_closed_issues: if issue not in inapplicable_misbehaviours and issue is not None: - return(0.0,[]) + return(0.0, []) # If you are here, then there are no applicable misbehaviours. return (1.0, []) + +def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + owner = configuration.get("owner",None) + if owner is None: + return (0.0, [RuntimeError("The owner is not specified in the configuration of did_workflows_fail.")]) + repo = configuration.get("repo",None) + if repo is None: + return (0.0, [RuntimeError("The repository is not specified in the configuration of did_workflows_fail.")]) + event = configuration.get("event","push") + url = f"https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure" + branch = configuration.get("branch",None) + if branch is not None: + url += f"+branch%3A{branch}" + + try: + res = requests.get(url, timeout=30) # Add timeout to prevent hanging + except requests.exceptions.ConnectionError as e: + return (0.0, [RuntimeError(f"Connection error when accessing {url}: {e}")]) + except requests.exceptions.Timeout as e: + return (0.0, [RuntimeError(f"Timeout error when accessing {url}: {e}")]) + except requests.exceptions.RequestException as e: + return (0.0, [RuntimeError(f"Request error when accessing {url}: {e}")]) + + if res.status_code != 200: + return (0.0, [RuntimeError(f"The website {url} can not be successfully reached! Status code: {res.status_code}")]) + m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) + if m is None: + return (0.0, [RuntimeError("The number of failed workflows can not be found.")]) + if m.group(1).strip() != "0": + return (0.0, [Warning("There are failed workflows!")]) + return (1.0, []) + +def coveralls_reporter(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + owner = configuration.get("owner",None) + if owner is None: + return (0.0, [ValueError("The owner needs to be specified in the configuration for coveralls_reporter.")]) + repo = configuration.get("repo",None) + if repo is None: + return (0.0, [ValueError("The repository needs to be specified in the configuration for coveralls_reporter.")]) + branch = configuration.get("branch",None) + if branch is not None: + url = f"coveralls.io/github/{owner}/{repo}?branch={branch}.json" + else: + url = f"coveralls.io/github/{owner}/{repo}.json" + res = requests.get(url) + if res.status_code != 200: + return (0.0, [RuntimeError(f"Can not reach {url} to fetch the code coverage!")]) + res = json.loads(res.text) + try: + covered_lines = int(res.get("covered_lines","0")) + relevant_lines = int(res.get("relevant_lines","1")) + except ValueError: + return (0.0, [RuntimeError("Critical error in the coveralls api: Expecting integer values for lines!")]) + try: + expected_line_coverage = float(configuration.get("line_coverage","0.0")) + except ValueError: + return (0.0, [ValueError("line_coverage needs to be a floating point value!")]) + try: + digits = int(configuration.get("significant_decimal_digits","3")) + except ValueError: + return (0.0, [ValueError("significant_decimal_digits needs to be an integer value!")]) + if round(expected_line_coverage, digits) != round(covered_lines/relevant_lines * 100, digits): + return (0.0, [Warning("The line coverage has changed!")]) + try: + covered_branches = int(res.get("covered_branches","0")) + relevant_branches = int(res.get("relevant_branches","1")) + except ValueError: + return (0.0, [RuntimeError("Critical error in the coveralls api: Expecting integer values for branches!")]) + try: + expected_branch_coverage = float(configuration.get("branch_coverage","0.0")) + except ValueError: + return (0.0, [ValueError("branch_coverage needs to be a floating point value!")]) + if round(expected_branch_coverage, digits) != round(covered_branches/relevant_branches * 100, digits): + return (0.0, [Warning("The branch coverage has changed!")]) + return (1.0, []) + + + +def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + validators = configuration.get("validators",None) + if validators is None: + return (1.0, [Warning("No validators were given, returning the void-validator.")]) + elif not isinstance(validators,list): + return (0.0, [TypeError("The list of validators must be given as list.")]) + scores = [] + exceptions = [] + weights = [] + for validator in validators: + # fetch configuration + validator_configuration = validator.get("configuration", None) + if not isinstance(validator_configuration,dict[str, yaml]): + return (0.0, [TypeError("Validator configuration must be an object.")]) + # fetch weight + weight = float(validator.get("weight",1.0)) + if weight<0: + return (0.0, [TypeError("Validator weights must be non-negative.")]) + weights.append(weight) + # fetch type + validator_type = validator.get("type", None) + if validator_type is None: + return (0.0, [TypeError("Missing validator type declaration.")]) + # execute validator + if validator_type == "check_artifact_exists": + validator_score, validator_errors = check_artifact_exists(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "https_response_time": + validator_score, validator_errors = https_response_time(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "check_test_results": + validator_score, validator_errors = check_test_results(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "file_exists": + validator_score, validator_errors = file_exists(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "sha_checker": + validator_score, validator_errors = sha_checker(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "check_issues": + validator_score, validator_errors = check_issues(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "did_workflows_fail": + validator_score, validator_errors = did_workflows_fail(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "coveralls_reporter": + validator_score, validator_errors = coveralls_reporter(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + if sum(weights) == 0.0: + return (0.0, exceptions) + else: + return (sum(list(map(lambda x,y: x*y, scores, weights)))/sum(weights),exceptions) diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml index 63106da7d2..2fa89d4ac8 100644 --- a/.github/workflows/publish_documentation.yml +++ b/.github/workflows/publish_documentation.yml @@ -111,52 +111,8 @@ jobs: # Fetch closed issues from the nlohmann/json repository gh issue list --repo "$REPO" --state closed --limit 10000 --json number,title,state,createdAt,url,labels > raw_closed_issues.json - # Add title to the output file - echo "# Misbehaviours Report" > $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - echo "This report lists known misbehaviours or bugs of v3.12.0 of the nlohmann/json repository. The misbehaviours are compiled from github issues of the nlohmann/json repository, and link to each corresponding issue." >> $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - - # Add subtitle for open issues - echo "## Open Issues" >> $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - - # Filter raw open issues for labels containing "bug" and convert to output .md file - jq -r ' - map(select(.labels[]?.name | test("bug"; "i"))) | - map("### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n") | - .[] - ' raw_open_issues.json >> $OUTPUT_FILE - - jq -c ' - .[] - | select(.labels[]?; .name | test("bug"; "i")) - | { n: .number - , md: "### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n" - } - ' raw_open_issues.json | - while IFS= read -r rec; do - num=$(jq -r '.n' <<< "$rec") - md=$(jq -r '.md' <<< "$rec") - py_out=$(python3 ./TSF/scripts/identify_nlohmann_issue.py "$num") - { - printf "%s\n" "$md" - printf "%s\n" "$py_out" - } >> "$OUTPUT_FILE" - done - - # Add subtitle for closed issues - echo "" >> $OUTPUT_FILE - echo "## Closed Issues (since v3.12.0)" >> $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - - # Filter raw closed issues for labels containing "bug", created after release date of nlohmann/json version in use, and convert to output .md file - jq -r ' - map(select(.labels[]?.name | test("bug"; "i"))) | - map(select(.createdAt > "2025-04-11T00:00:00Z")) | # Adjust date as needed, 2025-04-11 corresponds to release v3.12.0 of nlohmann/json - map("### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n") | - .[] - ' raw_closed_issues.json >> $OUTPUT_FILE + # print the list of misbehaviours + python3 TSF/scripts/generate_list_of_misbehaviours.py > $OUTPUT_FILE # Push misbehaviours file to save_historical_data branch git add TSF/misbehaviours.md diff --git a/TSF/README.md b/TSF/README.md index 38fb723d70..0dc6967d66 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -43,4 +43,197 @@ The releases process of this repository shall conform to the [release management - To indicate the version of nlohmann/json in use, the nlohmann/json release tag shall always be clearly included in the release notes of this repository. - The release notes of this repository shall always indicate whether the release includes changes to only TSF documentation, only the version of nlohmann/json, or both. -To update either the version of nlohmann/json within S-CORE or TSF documentation, please refer to the respective Update Concepts below. \ No newline at end of file +To update either the version of nlohmann/json within S-CORE or TSF documentation, please refer to the respective Update Concepts below. + +# Update Concept for the version of nlohmann/json within S-CORE + +## Assumptions of use + +This description of an update process is based on the following structure of the repository WHICH IS NOT THE CASE YET. +It is assumed that the repository possesses a default branch called ``main`` containing the most recent documented version of ``nlohmann/json`` together with its documentation. +Additionally, there is a branch ``develop``, which is **not** intended to mirror the branch of ``nlohmann/json`` with the same name, but instead serves as an in-repository testing ground for changes to either the library or its documentation. +The releases of the documented version are identified by tags on ``main``. +Moreover, the branch protection rules for ``main`` are set as described in the description of the forking process in ``TSF/README.md`` (WIP). + +Note that there is **no automatic information** on the existence of a new release in the original ``nlohmann/json``; instead the possibility to update is detected **manually**. +Note further that, due to the currently relatively limited use of nlohmann/json within S-CORE, there appears currently no inherent need to keep the version up to date. + +## Update process of the original nlohmann/json + +The releases of ``nlohmann/json`` are collected on the [Release site](https://github.com/nlohmann/json/releases) of the repository ``nlohmann/json``. +Each release announcement is expected to contain the release date, SHA-256 values for json.hpp, include.zip and json.tar.xz, and a brief list containing bug fixes, improvements, further changes and deprecated functions. +The new release is expected to be located within the branch **master**, from where the most recent version can be drawn. + +## Update process of the S-CORE version + +In the following, we shall describe the intricacies of updating the version of ``nlohmann/json`` within Eclipse S-CORE. +This version is not a mere fork of the original master branch of ``nlohmann/json``, but instead enriched with the documentation following the Trustable Software Framework (TSF). + +The enrichment with the documentation necessitates some changes to the fork of the original repository. +For the most part, these changes are in-obtrusive, and mere additions. +In particular, the folders ``include`` and ``single-include`` remain unchanged, and should be updated without further adaptations. +In some cases, however, additional tests are run and data are generated and collected, which were not run or generated in the original ``nlohmann/json``, so that obtrusive changes of files were necessary. +For these files, and in particular the workflow files, caution must be exercised, as to not disturb the documentation. +Moreover, some parts of the documentation must be adapted to the new version. + + +### What can not be updated without further precautions? + +* ``cmake/ci.cmake`` + This file defines, in particular, the various custom cmake targets; in particular, the various configurations for the execution of the unit- and integration-tests are defined. + The TSF requires, or, at the very least, strongly encourages us to collect test-results. + In order to do this efficiently, the ctest command is adapted to automatically generate the junit-logs of each test-run. + For this, the option ``--output-junit`` is set with output path ``../my_logs/TARGETNAME_junit.xml``, where TARGETNAME is replaced by the name of the respective cmake target; in case that this convention is insufficient to uniquely identify the logs, TARGETNAME is amended by a number. + When updating, it must be ensured that these adaptations are preserved. + Moreover, if the update introduces new cmake targets or new executions of ctest, it must be ensured, that the junit-log is generated and stored with a similar naming convention in the folder "../my_logs/". + Otherwise, it can not be ensured that the test data are accurately captured. + +* ``cmake/download_test_data.cmake`` + This file is modified to ensure that the test-data are not downloaded from the original test-data repository, but instead from the copy of that repository within the Eclipse S-CORE organisation. + It must be ensured that this change is preserved. + +* ``tests/CMakeLists.txt`` + This file collects, in particular, the files containing the unit- and integration-tests in a list, which is given to cmake. + Custom tests were added in TSF/tests to document the fulfillment of the expectations. + To ensure that these tests are run, the file tests/CMakeLists.txt has been modified. + During the update, it must be ensured, that the custom tests are still being executed. + +* ``.github/workflows/parent-workflow.yml`` + To ensure a specific execution order for the individual github workflows, their execution is orchestrated by the parent-workflow. + To guarantee that this order is respected, it must be ensured that every other workflow except for ``docs-cleanup.yml``, ``scorecards.yml`` and ``stale.yml`` runs ``on workflow_call``, only. + For the three exceptions, it is recommended to keep the execution scheduled as currently the case. + +* ``.github/workflows/ubuntu.yml`` + The ubuntu workflow orchestrates the parallel execution of various cmake targets with varying configurations running on the latest version of ubuntu. + The first adaptation is that every step, in which a junit-report is generated, generates an artifact. + It must be ensured, that these artifacts are still generated after the update. + The second adaptation is that the test-results are captured, processed and persistently stored or stored in the ubuntu-artifact. + Therefore, it must be ensured that the jobs ``publish_test_data_success``, ``publish_test_data_failure``, ``publish_test_data_cancellation`` and ``ubuntu_artifact`` are executed. + Moreover, in case that any further job is added by nlohmann, it must be ensured that this job is added to the list of jobs required before the latter workflows are executed. + If any further job added by nlohmann generates a junit-log, it must be ensured that this job generates an artifact containing its junit-logs. + +* ``.github/workflows/cifuzz.yml`` + This workflow uses Google's oss-fuzz, which is not available to the copy within Eclipse S-CORE. + Therefore, this workflow needs to be disabled in the copy. + Currently, this is done by removing it altogether, which we recommend to do so that no confusion as to why this workflow is not executed arises. + +* ``.github/workflows/publish_documentation.yml`` + This workflow is replaced with a completely customised version, which reflects the use of trudag and the integration into the Eclipse S-CORE organisation. + Therefore, it is recommended to not change this workflow. + In particular, the version of publish_documentation.yml in the original repository nlohmann/json must not replace the publish_documentation.yml of the present repository. + +* ``.github/workflows/test_trudag_extensions.yml`` + This workflow is not present in the original nlohmann/json and must not be removed, or modified (besides updating the versions of tools, if necessary) by the update. + +* Other entries of ``.github/workflows`` + For every workflow, it must be ensured that the conditions of their execution are unchanged. + The workflows ``check_amalgamation``, ``codeql``, ``dependency_review``, ``labeler`` and ``test_trudag_extensions`` generate an artifact, which must not be changed. + New workflows should be carefully reviewed. + If it is determined that their execution within the project is beneficial, and that they do not interfere with, then they should be integrated within the parent workflow at an appropriate place and their execution condition should be set to on ``workflow``, or their execution should be scheduled appropriately. + It is strongly recommended that the new workflow produces an artifact on success, and that the validator ``check_artifact_exists`` is adapted accordingly. + If nlohmann deletes any of the currently executed workflows, in particular ``check_amalgamation.yml``, ``codeql.yml``, ``dependency_review.yml``, ``labeler.yml``, ``test_trudag_extensions.yml`` and ``ubuntu.yml``, then it is strongly recommended to keep the currently executed version, since the automatic validator ``check_artifact_exists`` depends on the existence of these workflows. + In case that it is determined that these workflows should be deleted also in the documented copy of ``nlohmann/json``, then the validator ``check_artifact_exists`` and all its occurrences must be adapted accordingly. + +* ``ChangeLog.md`` + It must be ensured that the changes of the update are properly described in the file ``ChangeLog.md``. + + +### Necessary adaptations + +The following adaptation is recommended, and has, unfortunately, not been automated. + +* ``TSF/trustable/statements/JLS-02.md`` + It must be carefully ensured that this statement and its references are still valid. In particular, it is strongly recommended to refer to a fuzz testing result running on the version that is updated to. + + +The following adaptations to the documentation have been automated; the python-script TSF/scripts/update_helper.py may be used to assist with these changes. +For the error-free execution is it necessary, however, to adhere to the naming scheme json_version_X_XX_X, and to not change the structure of the directories. + +* ``TSF/Trustable/statements/JLS-11.md`` + It must be ensured that the correct release date is used. + +* ``TSF/trustable/statements/JLS-14.md`` + It must be ensured that the release of the correct version is referenced. + Furthermore, the sha-value of the evidence must be adapted to the one provided in that announcement post. + +* ``TSF/trustable/docs/introduction/index.rst`` + In this file, the version of ``nlohmann/json`` that is documented is explicitly mentioned at two places. + This version must be updated. + +* ``TSF/scripts/generate_list_of_misbehaviours.py`` + This script contains version and release date hard-coded. Both must be updated. + + +### Recommended procedure VERY MUCH WIP + +Based on the above observations, the following recommendations are derived. + +1. Ensure that the content of the branch ``develop`` is identical to the branch ``main``. + Since it is intended to not change the library itself, in particular the folders ``include`` and ``single_include``, this should be possible by updating the documentation. +2. Merge branch master from the original nlohmann/json into ``develop``, e.g. ``git checkout -b json_version_X_XX_X && git merge --no-commit nlohmann/master`` +3. Confirm the deletion of cifuzz.yml, macos.yml and windows.yml. +4. Resolve the potential merge conflict in publish-documentation.yml by rejecting the incoming changes. + Update the versions of the github actions, if necessary. +5. Resolve the potential merge conflicts in check_amalgamation.yml, codeql.yml, dependency_review.yml, labeler.yml, ``test_trudag_extensions.yml`` to ensure that the artifacts are generated, i.e. the jobs ``Generate XXX artifact`` and ``Upload XXX artifact`` are retained. +6. Resolve the potential merge conflict in ubuntu.yml following the above instructions. +7. Resolve the potential merge conflicts in cmake/download_test_data.cmake and cmake/ci.cmake following the above instructions. +8. Carefully examine the automatically merged changes. If no interference is to be expected, complete the merge. +9. In case any additional workflow has been added, carefully examine and integrate into the parent-workflow or schedule appropriately. +10. Adapt the documentation as described above. +11. Generate the documentation locally and carefully investigate any change in the trustable score(s). + If any relevant behaviour of the library changes, adapt the documentation. + Additionally, if any additional tests were added, or existing tests were changed, carefully investigate whether these warrant an amendment of the documentation. +12. Merge into the ``main``. +13. Create a new release under the tag FIXME + +# Update concept for the TSF documentation + +## Assumptions of use + +The documentation follows the Trustable Software Framework (TSF), which is documented [here](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html). +Furthermore, the automatic compilation of the documentation and the tracking of changes to the core functionalities of the library uses _trudag_, which is developed by Codethink and located [here](https://gitlab.com/CodethinkLabs/trustable/trustable). + + +## Version of trudag + +The documentation is currently built using trudag version 2025.8.5. +In case a major change of the trudag happens in the future, this might break some features of the documentation, or change some intended behaviours. +Thus, it is recommended to not change the version of trudag. +In case that it appears wise or necessary to change the version of trudag (e.g. when trudag is eventually certified), the following should be considered: + +* How has the algorithm for the accumulation of the trustable score changed? Ideally, it does not change, otherwise the necessity for a new review arises. +* How has the data store interface changed? Ideally, it has not changed, but historical data and the documentation indicate that a change of the data store interface happened at some time. +* How has the the expected configuration for the items changed? It is known that this configuration changed (at least) once before. What does the potential change mean? +* Do all custom references and validators as well as the data store interface work as before? +* Has the algorithm for the hashing changed, or are there any changes to the trustable scores? If so, investigate carefully! + + +## Subject-Matter-Expert-scores + +The intention with the SME scores is to find the _true_ trustable score by means of a heuristic law-of-large-numbers argument. +Therefore, it is very much welcome if contributors add their SME scores to statements for which they feel confident to do so. +While the committer may check SME scores for plausibility, it is recommended to not question SME scores as this interferes with the assumed independence of the SME! +It is recommended that changes to SME scores are accumulated in the branch ``develop`` before the release of a new version of the documentation as to not clutter the release history. +It is highly recommended to not delete SME scores under usual circumstances; most certainly, the SME scores should never be changed by anybody except the original SME. +The following unusual circumstances can, after careful consideration, justify the removal or (much preferably!) the request for re-evaluation by the original SMEs: + +* change of references: + If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) changes, then this should trigger a re-evaluation of the statement. + In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. +* addition of automatic validators: + Recall that the SME-scores have different meanings depending on whether or not an automatic validator is implemented. + In the absence of a validator, the SME shall assess their confidence in the statement based on linked artifacts (references) and their own knowledge. + In the presence of a validator, the SME shall assess only their confidence in the validator as an accurate measure of the truth of the statement. + +## Validators + +The automatic validators are intended to calculate a trustable score based on quantifiable data. +In particular the introduction of a validator changes the meaning of the (potential) SME scores associated to a statement. +Therefore, the change or introduction of an automatic validator is most critical. +It is highly recommended to urge the original SME to re-review the statement and adapt their scores, or (at the least) to enlist additional SME to judge the changed statement. +After careful consideration the highly critical decision to remove some SME scores no longer reflecting the statement could be made. + +## References + +References should be treated as validators, i.e. any update of a reference should trigger a re-review by the SME. +For references, however, the decision to remove a stale SME score is even more critical unless the reference reveals critical new information, which is highly unlikely, or the change of the reference is triggered by a significant change in the behaviour of the library, which heavily affected the statement. \ No newline at end of file diff --git a/TSF/scripts/generate_list_of_misbehaviours.py b/TSF/scripts/generate_list_of_misbehaviours.py new file mode 100644 index 0000000000..c6cac25d12 --- /dev/null +++ b/TSF/scripts/generate_list_of_misbehaviours.py @@ -0,0 +1,45 @@ +import json +from datetime import datetime, timezone +from identify_nlohmann_issue import comment_nlohmann_misbehaviours + +version = "3.12.0" +release_date = "2025-04-11T08:43:39Z" + +if __name__ == "__main__": + release_time = datetime.strptime(release_date,"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).timestamp() + + # fetch relevant issues + with open("raw_open_issues.json") as list_1: + all_open_issues = json.load(list_1) + relevant_open_issues = [all_open_issues[i] for i in range(0,len(all_open_issues)) + if len(all_open_issues[i].get("labels",[]))!=0 + and any(label.get("name") == "kind: bug" for label in all_open_issues[i].get("labels", [])) + ] + with open("raw_closed_issues.json") as list_2: + all_closed_issues = json.load(list_2) + relevant_closed_issues = [all_closed_issues[i] for i in range(0,len(all_closed_issues)) + if len(all_closed_issues[i].get("labels",[]))!=0 + and any(label.get("name") == "kind: bug" for label in all_closed_issues[i].get("labels", [])) + and datetime.strptime(all_closed_issues[i].get("createdAt","2000-01-01T00:00:00Z"),"%Y-%m-%dT%H:%M:%SZ") + .replace(tzinfo=timezone.utc) + .timestamp() + >=release_time + ] + + print("# Misbehaviours Report\n") + print(f"This report lists known misbehaviours or bugs of version {version} of the nlohmann/json repository.") + print("The misbehaviours are compiled from github issues of the nlohmann/json repository, and link to each corresponding issue.\n") + + + print("## Open issues\n") + for issue in relevant_open_issues: + print(f"### [#{issue.get('number')}]({issue.get('url')})\n- **Title:** {issue.get('title')}\n- **State:** {issue.get('state')}\n- **Created At:** {issue.get('createdAt')}\n") + comment_nlohmann_misbehaviours(int(issue.get("number"))) + print("\n") + + print(f"\n## Closed Issues (since version {version})\n") + for issue in relevant_closed_issues: + print(f"### [#{issue.get('number')}]({issue.get('url')})\n- **Title:** {issue.get('title')}\n- **State:** {issue.get('state')}\n- **Created At:** {issue.get('createdAt')}\n") + comment_nlohmann_misbehaviours(int(issue.get("number"))) + print("\n") + diff --git a/TSF/scripts/update_helper.py b/TSF/scripts/update_helper.py new file mode 100644 index 0000000000..a8472b99a2 --- /dev/null +++ b/TSF/scripts/update_helper.py @@ -0,0 +1,274 @@ +import argparse +import re +import requests +import hashlib +from pathlib import Path + +def main() -> None: + ap = argparse.ArgumentParser(description="little helper script automatically updating version numbers and release dates") + ap.add_argument("-v", + "--version", + help="version number to be updated to; if unspecified, most recent version is chosen", + default=None + ) + ap.add_argument("-c", + "--check", + help="checks whether current and specified (or most recent) version of single_include/nlohmann/json.hpp coincide; no other action is performed", + action=argparse.BooleanOptionalAction + ) + ap.add_argument("-a", + "--auto", + help="automatically updates all options", + action=argparse.BooleanOptionalAction + ) + ap.add_argument("-u", + "--update", + action='append', + choices=["JLS-01","JLS-06","JLS-07","JLS-11","JLS-14","introduction","misbehaviours"], + help="updates the specified file(s):" \ + " JLS-01 - TSF/trustable/JLS-01.md," \ + " JLS-06 - TSF/trustable/JLS-06.md," \ + " JLS-07 - TSF/trustable/JLS-07.md," \ + " JLS-11 - TSF/trustable/JLS-11.md," \ + " JLS-14 - TSF/trustable/JLS-14.md," \ + " introduction - TSF/docs/introduction/index.rst," \ + " misbehaviours - TSF/scripts/generate_list_of_misbehaviours.py", + default=None + ) + ap.add_argument("-b", + "--branch", + help="name of the branch to which the references for branch protection and workflow-failures point to", + default=None + ) + ap.add_argument("-bo", + "--branch_only", + help="adapts branch-names only", + action=argparse.BooleanOptionalAction + ) + args = ap.parse_args() + + root = Path(__file__).resolve().parent.parent.parent + + print(args) + + if (not args.check + and ( + (not args.auto + and args.update is None) + or (args.branch_only + and args.branch is None) + ) + ): + # do nothing + return None + + # Fetch the metadata + version, url, release_date, expected_sha = fetch_metadata(args.version) + + # if flag check is set, then the sha of single_include/nlohmann/json.hpp is cross-checked with the sha of the specified version + if args.check: + if not check(expected_sha,root): + if args.version is None: + print(f"The current version of single_include/nlohmann/json.hpp is not the most recent one, which is {version}.") + else: + print(f"The current version of single_include/nlohmann/json.hpp does not coincide with {version}.") + else: + if args.version is None: + print(f"The current version of single_include/nlohmann/json.hpp is the most recent one, which is {version}.") + else: + print(f"The current version of single_include/nlohmann/json.hpp coincides with {version}.") + # No other action is performed. + return None + if not check(expected_sha,root): + print("\nWARNING! The current version of single_include/nlohmann/json.hpp does not coincide with {version}.\n\nIf you proceed, then the documentation is expected to contain wrong data!") + user = input("Proceed anyway? [y/n] ").strip().lower() + if user != "y": + print("Aborting update ...") + return None + # if flag auto is set, then all is updated automatically + if args.auto: + if args.branch is not None: + update_JLS_01(args.branch,root) + update_JLS_06(args.branch,root) + update_JLS_07(args.branch,root) + if not args.branch_only: + update_JLS_11(release_date,root) + update_JLS_14(url,expected_sha,root) + update_intro(version,root) + update_misbehaviours(version,release_date,root) + # no other action is necessary + return None + if "JLS-01" in args.update: + update_JLS_01(args.branch,root) + if "JLS-06" in args.update: + update_JLS_06(args.branch,root) + if "JLS-07" in args.update: + update_JLS_07(args.branch,root) + if args.branch_only: + return None + if "JLS-11" in args.update: + update_JLS_11(release_date,root) + if "JLS-14" in args.update: + update_JLS_14(url,expected_sha,root) + if "introduction" in args.update: + update_intro(version,root) + if "misbehaviours" in args.update: + update_misbehaviours(version,release_date,root) + +def update_JLS_01(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-01.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-01.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_06(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-06.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-06.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_07(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-07.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-07.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_11(release_date: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-11.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-11.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*release_date:\s*")([^"]*)(")', r'\g<1>' + release_date + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_14(url: str, sha: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_14 = root / "trustable/statements/JLS-14.md" + else: + path_to_jls_14 = root / "TSF/trustable/statements/JLS-14.md" + data = path_to_jls_14.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*sha:\s*")([^"]*)(")', r'\g<1>' + sha + r'\g<3>', data) + data = re.sub(r'(?m)^(\s*url:\s*")([^"]*)(")', r'\g<1>' + url + r'\g<3>', data) + path_to_jls_14.write_text(data) + +def update_intro(version: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_intro = root / "docs/introduction/index.rst" + else: + path_to_intro = root / "TSF/docs/introduction/index.rst" + data = path_to_intro.read_text(encoding='utf-8') + data = re.sub(r'(\(version\s+)([^)]*)(\))', + lambda m: f"{m.group(1)}{version}{m.group(3)}", + data) + path_to_intro.write_text(data) + +def update_misbehaviours(version: str, release_date: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent + path_to_script = root / "generate_list_of_misbehaviours.py" + else: + path_to_script = root / "TSF/scripts/generate_list_of_misbehaviours.py" + data = path_to_script.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*version\s*=\s*")([^"]*)(")', r'\g<1>' + version + r'\g<3>', data) + data = re.sub(r'(?m)^(\s*release_date\s*=\s*")([^"]*)(")', r'\g<1>' + release_date + r'\g<3>', data) + path_to_script.write_text(data) + + +def fetch_metadata(version = None) -> tuple[str,str,str]: + # This function fetches the metadata of the release of the version of nlohmann/json specified in the input. + # If the input is None, then the most recent version is fetched. + # The function returns the version number, the release date in the format %Y-%m-%dT%H:%M:%SZ + # and the sha256-value of the json.hpp of the released version + + if version is None: + version = "" + + # fetch the sha-value of most recent release + releases = requests.get("https://github.com/nlohmann/json/releases") + if releases.status_code != 200: + raise Warning("The release page of nlohmann/json appears to be currently not reachable.") + releases_by_the_line = releases.text.splitlines() + # releases is expected to be huge, delete to free up room + del releases + found_version = False + found_sha = False + found_release_date = False + found_tag = False + for line in releases_by_the_line: + # look for + if not found_version and f"JSON for Modern C++ version {version}" not in line: + continue + elif not found_version: + if version == "": + m = re.search(r'JSON for Modern C\+\+ version\s*([^<"]*)<',line) + if m is None: + raise RuntimeError("Critical Error: Can not find version number of most recent release!") + version = m.group(1) + found_version = True + continue + if not found_release_date and "datetime=" in line: + m = re.search(r'datetime\s*=\s*"([^"]*)"', line) + if m is None: + raise RuntimeError(f"Critical Error: Can not find release-date of version {version}!") + release_date = m.group(1) if m else None + found_release_date = True + if not found_sha and "SHA-256:" in line and "(json.hpp)" in line: + expected_sha = line.split("SHA-256:", 1)[1].split("(json.hpp)", 1)[0].strip() + found_sha = True + if not found_tag and "/nlohmann/json/tree" in line: + m = re.search(r'href\s*=\s*"([^"]*)"', line) + if m is None: + raise RuntimeError(f"Critical Error: Can not find link to release version {version}!") + url = "https://github.com" + m.group(1) + found_tag = True + if found_version and found_sha and found_release_date and found_tag: + return (version, url, release_date, expected_sha) + if "JSON for Modern C++ version" in line and f"JSON for Modern C++ version {version}" not in line: + if not found_version and not found_release_date and not found_tag: + error_message = "Could not find any metadata" + elif not found_sha: + error_message = "Could not find SHA-value for json.hpp" + if not found_release_date: + error_message += " and relase-date" + elif not found_tag: + error_message += " and link to code" + elif not found_release_date: + error_message = "Could not find release-date" + if not found_tag: + error_message += " and link to code" + else: + error_message = "Could not find link to code" + error_message += f" of version {version}!" if version!="" else " of most recent version!" + raise RuntimeError(error_message) + # If ever the for-loop comes to its end, the specified version can not be found! + raise RuntimeError(f"Can not locate the release of version {version}!") + + +def check(expected_sha: str, root: Path | None = None) -> bool: + # get the actual sha-value of the single_include.json + if root is None: + root = Path(__file__).resolve().parent.parent.parent + single_include_json_path = root / "single_include/nlohmann/json.hpp" + with single_include_json_path.open('rb') as f: + actual_sha = hashlib.file_digest(f, 'sha256').hexdigest() + return actual_sha == expected_sha + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/TSF/tests/unit-arrays.cpp b/TSF/tests/unit-arrays.cpp index 3e94f0c3df..8ce0f087a0 100644 --- a/TSF/tests/unit-arrays.cpp +++ b/TSF/tests/unit-arrays.cpp @@ -48,6 +48,26 @@ TEST_CASE("accept") // double check if rejection is not due to overflow CHECK_THROWS_AS(parser_helper(faulty_array.str()),json::parse_error&); } + SECTION("whitespace") + { + CHECK(json::accept(" [true] ")); + CHECK(json::accept(" [true]\t")); + CHECK(json::accept(" [true]\n")); + CHECK(json::accept(" [true]\u000d")); + CHECK(json::accept("\t[true] ")); + CHECK(json::accept("\t[true]\t")); + CHECK(json::accept("\t[true]\n")); + CHECK(json::accept("\t[true]\u000d")); + CHECK(json::accept("\n[true] ")); + CHECK(json::accept("\n[true]\t")); + CHECK(json::accept("\n[true]\n")); + CHECK(json::accept("\n[true]\u000d")); + CHECK(json::accept("\u000d[true] ")); + CHECK(json::accept("\u000d[true]\t")); + CHECK(json::accept("\u000d[true]\n")); + CHECK(json::accept("\u000d[true]\u000d")); + CHECK(json::accept(" \n\t\t\n \t\u000d[true] \n\n\n \t\t\u000d \n")); + } } TEST_CASE("parse") diff --git a/TSF/tests/unit-byte_order_mark.cpp b/TSF/tests/unit-byte_order_mark.cpp index e3a8e14ba9..b37aaa36f8 100644 --- a/TSF/tests/unit-byte_order_mark.cpp +++ b/TSF/tests/unit-byte_order_mark.cpp @@ -20,7 +20,7 @@ TEST_CASE("accept") { SECTION("single BOM") { - // a single byte order mark is treated as an empty token, which is not a valid json token. + // a single byte order mark is treated as an empty token, which is not a valid json token. CHECK(!json::accept("\xEF\xBB\xBF")); CHECK(json::accept("\xEF\xBB\xBF\n\"foo\"")); CHECK(json::accept("\xEF\xBB\xBF\"foo\"")); @@ -72,7 +72,7 @@ TEST_CASE("parse") { SECTION("multiple BOM") { - // Whenever a fourth character of a BOM-candidate is read, an error is thrown. + // Whenever a fourth character of a BOM-candidate is read, an error is thrown. // This error does not depend on any trailing garbage. CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); diff --git a/TSF/tests/unit-class_parser_core.cpp b/TSF/tests/unit-class_parser_core.cpp index 9bf171e76c..bb908f0a44 100644 --- a/TSF/tests/unit-class_parser_core.cpp +++ b/TSF/tests/unit-class_parser_core.cpp @@ -8,10 +8,10 @@ /* This file has been adapted from the original nlohmann/json library (tests/src/unit-class_parser.cpp) -to use the plain json::accept() and json::parse() functions instead of advanced helper functions, -which make many additional function calls (see the definitions of parse_helper and accept_helper in -tests/src/unit-class_parser.cpp). This allows to directly attribute a test result to the accept() or -parse() function, which is needed to use the test results as evidence for the well-formedness and parsing +to use the plain json::accept() and json::parse() functions instead of advanced helper functions, +which make many additional function calls (see the definitions of parse_helper and accept_helper in +tests/src/unit-class_parser.cpp). This allows to directly attribute a test result to the accept() or +parse() function, which is needed to use the test results as evidence for the well-formedness and parsing of JSON requirements. Unnecessary code and test sections have been removed. */ @@ -34,7 +34,7 @@ bool accept_helper(const std::string& s); json parser_helper(const std::string& s) { - return json::parse(s); + return json::parse(s); } bool accept_helper(const std::string& s) diff --git a/TSF/tests/unit-literals.cpp b/TSF/tests/unit-literals.cpp index 16f360ed64..defa7525df 100644 --- a/TSF/tests/unit-literals.cpp +++ b/TSF/tests/unit-literals.cpp @@ -62,7 +62,7 @@ TEST_CASE("accept") CHECK(!json::accept("NULL")); } SECTION("false") - { + { CHECK(!json::accept("False")); CHECK(!json::accept("fAlse")); CHECK(!json::accept("FAlse")); @@ -1134,7 +1134,7 @@ TEST_CASE("parse") CHECK(json::parse("\u000dtrue ")==json::parse("true")); CHECK(json::parse("\u000dtrue\t")==json::parse("true")); CHECK(json::parse("\u000dtrue\n")==json::parse("true")); - CHECK(json::parse("\u000dtrue\u000d")==json::parse("true")); + CHECK(json::parse("\u000dtrue\u000d")==json::parse("true")); } SECTION("capitalisation") { @@ -1175,7 +1175,7 @@ TEST_CASE("parse") CHECK_THROWS_AS(parser_helper("NULL"),json::parse_error&); } SECTION("false") - { + { CHECK_THROWS_AS(parser_helper("False"),json::parse_error&); CHECK_THROWS_AS(parser_helper("fAlse"),json::parse_error&); CHECK_THROWS_AS(parser_helper("FAlse"),json::parse_error&); diff --git a/TSF/tests/unit-numbers.cpp b/TSF/tests/unit-numbers.cpp index 12724fa44d..f32b7fb4f1 100644 --- a/TSF/tests/unit-numbers.cpp +++ b/TSF/tests/unit-numbers.cpp @@ -20,7 +20,7 @@ TEST_CASE("accept") // The only valid exponents are U+0065 and U+0045. // Their look-alikes, in particular U+0425 and U+0436 are forbidden. SECTION("U+0425") - { + { CHECK(!json::accept("0\u0425123")); CHECK(!json::accept("123\u04250")); CHECK(!json::accept("0.123\u0425123")); @@ -28,7 +28,7 @@ TEST_CASE("accept") CHECK(!json::accept("1.23\u04250")); } SECTION("U+0436") - { + { CHECK(!json::accept("0\u0436123")); CHECK(!json::accept("123\u04360")); CHECK(!json::accept("0.123\u0436123")); @@ -63,7 +63,6 @@ TEST_CASE("accept") } SECTION("minus") { - CHECK(!json::accept("1-1")); CHECK(!json::accept("0.1-1")); CHECK(!json::accept("0.1-1.0")); @@ -75,7 +74,6 @@ TEST_CASE("accept") } SECTION("brackets") { - CHECK(!json::accept("(145)")); CHECK(!json::accept("(34.32874)")); CHECK(!json::accept("42\u0045(134)")); @@ -83,12 +81,10 @@ TEST_CASE("accept") } SECTION("factorial") { - CHECK(!json::accept("13!")); } SECTION("multiplication") { - CHECK(!json::accept("1*1")); CHECK(!json::accept("1.45*5")); CHECK(!json::accept("154*23.76")); @@ -99,7 +95,6 @@ TEST_CASE("accept") } SECTION("division") { - CHECK(!json::accept("0/0")); CHECK(!json::accept("1.45/5")); CHECK(!json::accept("154/23.76")); @@ -110,7 +105,6 @@ TEST_CASE("accept") } SECTION("comma") { - CHECK(!json::accept("0,0")); CHECK(!json::accept("100,000")); CHECK(!json::accept("1,000.23")); @@ -283,7 +277,7 @@ TEST_CASE("accept") CHECK(!json::accept("-0000000000000000000000000000000000042")); } // According to RFC8259, only numbers in base ten are allowed. For bases lower than ten, this can - // not be checked using the numerical representation and checking the grammar, assuming that the + // not be checked using the numerical representation and checking the grammar, assuming that the // standard digits are used; instead, this is the job of the parser. // For bases exceeding ten, this can be checked. In particular hexadecimal can be tested for. // For base eight, this can also be tested assuming that one of the conventions for the @@ -322,14 +316,14 @@ TEST_CASE("accept") TEST_CASE("parse") { - // While leading zeroes are forbidden according to RFC8259, + // While leading zeroes are forbidden according to RFC8259, // leading zeroes in the exponent are allowed and ignored in the parsing. SECTION("exponents") { // The only valid exponents are U+0065 and U+0045. // Their look-alikes, in particular U+0425 and U+0436 are forbidden. SECTION("U+0425") - { + { CHECK_THROWS_AS(parser_helper("0\u0425123"),json::parse_error&); CHECK_THROWS_AS(parser_helper("123\u04250"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.123\u0425123"),json::parse_error&); @@ -337,7 +331,7 @@ TEST_CASE("parse") CHECK_THROWS_AS(parser_helper("1.23\u04250"),json::parse_error&); } SECTION("U+0436") - { + { CHECK_THROWS_AS(parser_helper("0\u0436123"),json::parse_error&); CHECK_THROWS_AS(parser_helper("123\u04360"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.123\u0436123"),json::parse_error&); @@ -384,7 +378,6 @@ TEST_CASE("parse") } SECTION("minus") { - CHECK_THROWS_AS(parser_helper("1-1"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.1-1"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.1-1.0"),json::parse_error&); @@ -396,7 +389,6 @@ TEST_CASE("parse") } SECTION("brackets") { - CHECK_THROWS_AS(parser_helper("(145)"),json::parse_error&); CHECK_THROWS_AS(parser_helper("(34.32874)"),json::parse_error&); CHECK_THROWS_AS(parser_helper("42\u0045(134)"),json::parse_error&); @@ -408,7 +400,6 @@ TEST_CASE("parse") } SECTION("multiplication") { - CHECK_THROWS_AS(parser_helper("1*1"),json::parse_error&); CHECK_THROWS_AS(parser_helper("1.45*5"),json::parse_error&); CHECK_THROWS_AS(parser_helper("154*23.76"),json::parse_error&); @@ -419,7 +410,6 @@ TEST_CASE("parse") } SECTION("division") { - CHECK_THROWS_AS(parser_helper("0/0"),json::parse_error&); CHECK_THROWS_AS(parser_helper("1.45/5"),json::parse_error&); CHECK_THROWS_AS(parser_helper("154/23.76"),json::parse_error&); @@ -430,14 +420,13 @@ TEST_CASE("parse") } SECTION("comma") { - CHECK_THROWS_AS(parser_helper("0,0"),json::parse_error&); CHECK_THROWS_AS(parser_helper("100,000"),json::parse_error&); CHECK_THROWS_AS(parser_helper("1,000.23"),json::parse_error&); } } SECTION("trailing zeroes") - { + { // Trailing zeroes after the decimal point do not influence the parsing CHECK(json::parse("3.1415000000000000000000000")==json::parse("3.1415")); CHECK(json::parse("3.1415000000000\u004515")==json::parse("3.1415\u004515")); @@ -543,7 +532,7 @@ TEST_CASE("parse") } SECTION("Precision") { - CHECK(json::parse("1.7976931348623158e308").dump()=="1.7976931348623157e+308"); // maximum double value + CHECK(json::parse("1.7976931348623158e308").dump()=="1.7976931348623157e+308"); // maximum double value CHECK(json::parse("-1.7976931348623158e308").dump()=="-1.7976931348623157e+308"); // minimum double value } } diff --git a/TSF/tests/unit-objects.cpp b/TSF/tests/unit-objects.cpp index f5c2610767..eca7ba403b 100644 --- a/TSF/tests/unit-objects.cpp +++ b/TSF/tests/unit-objects.cpp @@ -16,7 +16,7 @@ void parser_helper(const std::string& input){ TEST_CASE("accept") { // A name (or key) is a string. No other token is a valid name - // See also n_object_missing_key.json, n_object_non_string_key.json, + // See also n_object_missing_key.json, n_object_non_string_key.json, // n_object_non_string_key_but_huge_number_instead.json, n_object_repeated_null_null // n_object_unquoted_key for some non-exhaustive tests SECTION("names") @@ -88,7 +88,7 @@ TEST_CASE("accept") CHECK(json::accept("{\"foo\\u001fbar\":123}")); } SECTION("unicode") - { + { // escaped CHECK(json::accept("{\"\\u0066\\u006f\\u006f\\u0062\\u0061\\u0072\":123}")); // unescaped @@ -109,7 +109,7 @@ TEST_CASE("accept") } } } - // Name/key and value of an array are treated as any other token. + // Name/key and value of an array are treated as any other token. // In particular, leading and trailing whitespace are ignored SECTION("whitespace") { @@ -129,7 +129,7 @@ TEST_CASE("accept") CHECK(json::accept("{\u000d\"foo\"\u000d:\u000d\"bar\"\u000d}")); CHECK(json::accept("{ \"foo\"\t:\n\"bar\"\n}")); CHECK(json::accept("{\t\t\t\t\t\n\n\u000d\"foo\"\t \t\t \n\n \u000d:\"bar\"}")); - } + } } // The colon U+003A is the only valid member separator. // Look-alikes are illegal. @@ -137,7 +137,7 @@ TEST_CASE("accept") SECTION("member separator") { CHECK(json::accept("{\"foo\"\u003a\"bar\"}")); //: - CHECK(!json::accept("{\"foo\"\uff1a\"bar\"}")); + CHECK(!json::accept("{\"foo\"\uff1a\"bar\"}")); CHECK(!json::accept("{\"foo\"\ua789\"bar\"}")); CHECK(!json::accept("{\"foo\"\u005b\"bar\"}")); //[ CHECK(!json::accept("{\"foo\"\u007b\"bar\"}")); //{ @@ -169,14 +169,14 @@ TEST_CASE("parse") CHECK(json::parse("{ \"foo\"\t:\n\"bar\"\n}")==json::parse("{\"foo\":\"bar\"}")); CHECK(json::parse("{\t\t\t\t\t\n\n\u000d\"foo\"\t \t\t \n\n \u000d:\"bar\"}")==json::parse("{\"foo\":\"bar\"}")); } - } + } // The colon U+003A is the only valid member separator. // Look-alikes are illegal. // All other valid structural characters are illegal. SECTION("member separator") { CHECK_NOTHROW(parser_helper("{\"foo\"\u003a\"bar\"}")); //: - CHECK_THROWS_AS(parser_helper("{\"foo\"\uff1a\"bar\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{\"foo\"\uff1a\"bar\"}"),json::parse_error&); CHECK_THROWS_AS(parser_helper("{\"foo\"\ua789\"bar\"}"),json::parse_error&); CHECK_THROWS_AS(parser_helper("{\"foo\"\u005b\"bar\"}"),json::parse_error&); //[ CHECK_THROWS_AS(parser_helper("{\"foo\"\u007b\"bar\"}"),json::parse_error&); //{ @@ -249,6 +249,6 @@ TEST_CASE("parse") ss2 >> _2; ss3 >> _3; CHECK(_2==_3); - } + } } } diff --git a/TSF/tests/unit-strings.cpp b/TSF/tests/unit-strings.cpp index 65f90854b4..e0ad653691 100644 --- a/TSF/tests/unit-strings.cpp +++ b/TSF/tests/unit-strings.cpp @@ -10,7 +10,7 @@ using nlohmann::json; namespace { void parser_helper(const std::string& input); -std::string uint_to_utf8(const uint32_t& input); +std::string uint_to_utf8(const uint32_t& input); void parser_helper(const std::string& input){ const json temp = json::parse(input); @@ -258,8 +258,8 @@ TEST_CASE("Unicode" * doctest::skip()) if (i>=0xD800 && i<=0xDFFF) { // Unpaired utf-16 surrogates are illegal. - // Observe that this verbatim not what RFC8259 §7 prescribes; - // it appears, however, to be in the spirit of RFC8259, cf. §8.2 + // Observe that this verbatim not what RFC8259 §7 prescribes; + // it appears, however, to be in the spirit of RFC8259, cf. §8.2 // Illegal characters are not parsed anyway. CHECK(!json::accept(temp.str())); CHECK(!json::accept(temp2.str())); @@ -278,10 +278,10 @@ TEST_CASE("Unicode" * doctest::skip()) for (uint32_t i = 0x0000; i<=0x10FFFF; i++) { std::string temp = uint_to_utf8(i); - if ((i>=0xD800 && i<=0xDFFF)) { + if ((i>=0xD800 && i<=0xDFFF)) { // Unpaired utf-16 surrogates are illegal. - // Observe that this verbatim not what RFC8259 §7 prescribes; - // it appears, however, to be in the spirit of RFC8259, cf. §8.2 + // Observe that this verbatim not what RFC8259 §7 prescribes; + // it appears, however, to be in the spirit of RFC8259, cf. §8.2 // The other characters are illegal if unescaped. CHECK(!json::accept(temp)); CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); @@ -291,7 +291,7 @@ TEST_CASE("Unicode" * doctest::skip()) CHECK(!json::accept(temp)); CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); } - } + } } else if (i<0x0020||i==0x0022||i==0x005c){ CHECK(!json::accept(temp)); CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); @@ -334,7 +334,7 @@ TEST_CASE("Unicode" * doctest::skip()) CHECK_THROWS_AS(parser_helper(temp.str()),json::parse_error&); } } - } + } } } diff --git a/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md b/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md new file mode 100644 index 0000000000..b1b19b5e98 --- /dev/null +++ b/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md @@ -0,0 +1,64 @@ +#### Checklist for TA-ANALYSIS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* What fraction of Expectations are covered by the test data? + + **Answer**: The two expectations are JLEX-01 and JLEX-02. Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. + +* What fraction of Misbehaviours are covered by the monitored indicator data? + + **Answer**: For the intended use-case, no misbehaviours have been identified. Furthermore, no indicator data are collected. + +* How confident are we that the indicator data are accurate and timely? + + **Answer**: No indicator data are collected. + +* How reliable is the monitoring process? + + **Answer**: Due to no indicator data being collected, there is no monitoring process. + +* How well does the production data correlate with our test data? + + **Answer**: Due to the general nature of the library, there are no production data. + +* Are we publishing our data analysis? + + **Answer**: Since we have no production data with which to compare our not collected indicator data or our test data, no data analysis is done, which is not published. + +* Are we comparing and analysing production data vs test? + + **Answer**: There are no production data. + +* Are our results getting better, or worse? + + **Answer**: Neither. + +* Are we addressing spikes/regressions? + **Answer**: There are no spikes in the non-existent indicator data. If a test ever fails, then the spike is investigated. The results of fuzz testing are investigated in the original nlohmann/json. + +* Do we have sensible/appropriate target failure rates? + + **Answer**: For the unit and integration tests, zero. The target failure rate of fuzz testing is not under our control. + +* Do we need to check the targets? + + **Answer**: For the unit and integration tests, no. Since the fuzz testing runs and is investigated in the original nlohmann/json, there is no need to check the target. + +* Are we achieving the targets? + + **Answer**: For the unit and integration tests, yes. The achieving of the targets for the fuzz-testing is evaluated within the original nlohmann/json. + +* Are all underlying assumptions and target conditions for the analysis specified? + + **Answer**: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. In case any test fails ever, the failure of the CI-pipeline encourages the maintainer to investigate. + +* Have the underlying assumptions been verified using known good data? + + **Answer**: The assumption that all unit and integration tests succeed under the expected conditions is demonstrated by the non-failure of the CI-Pipeline. + +* Has the Misbehaviour identification process been verified using known bad data? + + **Answer**: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour, enabling everyone to verify the identified misbehaviours. There is, however, no automatic process for the identification of misbehaviours. + +* Are results shown to be reproducible? + + **Answer**: It is expected that the tests can be reproduced on every modern sufficiently powerful machine. diff --git a/TSF/trustable/assertions/TA-ANALYSIS.md b/TSF/trustable/assertions/TA-ANALYSIS.md index ce74c5c359..d9f20b5e3d 100644 --- a/TSF/trustable/assertions/TA-ANALYSIS.md +++ b/TSF/trustable/assertions/TA-ANALYSIS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md --- Collected data from tests and monitoring of deployed software is analysed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md b/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md new file mode 100644 index 0000000000..bfd9dfbb66 --- /dev/null +++ b/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md @@ -0,0 +1,50 @@ +#### Checklist for TA-BEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How has the list of Expectations varied over time? + + **Answer**: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. + +* How confident can we be that this list is comprehensive? + + **Answer**: The list of expectations has been collected amongst the stakeholders in S-CORE, so that we are very confident that the list is comprehensive. + The expectation to serialize user data into JSON format + +* Could some participants have incentives to manipulate information? + + **Answer**: We can not imagine any reason. + +* Could there be whole categories of Expectations still undiscovered? + + **Answer**: It is unlikely, but the parsing of CBOR could become relevant at some time. + +* Can we identify Expectations that have been understood but not specified? + + **Answer**: No. + +* Can we identify some new Expectations, right now? + + **Answer**: No. + +* How confident can we be that this list covers all critical requirements? + + **Answer**: We can not think of any more critical requirement of a JSON parser in the sense of RFC8259 than to parse JSON data in the sense of RFC8259. + +* How comprehensive is the list of tests? + + **Answer**: Currently, the branch coverage is 93.865% and the line coverage is 99.186%, cf. JLS-27. + +* Is every Expectation covered by at least one implemented test? + + **Answer**: Yes, both of the expectations are covered by at least one implemented test. Moreover, each statement supporting the expectations is covered by a test with the exception of WFJ-06. + +* Are there any Expectations where we believe more coverage would help? + + **Answer**: No. + +* How do dependencies affect Expectations, and are their properties verifiable? + + **Answer**: The library nlohmann/json does not have external dependencies, so that there are in particular none that affect Expectations. + +* Are input analysis findings from components, tools, and data considered in relation to Expectations? + + **Answer**: No findings have been found. diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS.md b/TSF/trustable/assertions/TA-BEHAVIOURS.md index 135f16b2f3..fb527009d0 100644 --- a/TSF/trustable/assertions/TA-BEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-BEHAVIOURS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md --- Expected or required behaviours for nlohmann/json library are identified, specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md b/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md new file mode 100644 index 0000000000..0160733b07 --- /dev/null +++ b/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md @@ -0,0 +1,17 @@ +#### Checklist for TA-CONFIDENCE from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* What is the algorithm for combining/comparing the scores? + + **Answer**: It is the standard algorithm of trudag. + +* How confident are we that this algorithm is fit for purpose? + + **Answer**: We have no reason to assume that the standard algorithm is not fit for our purpose. + +* What are the trends for each score? + + **Answer**: CAN NOT BE ANSWERED NOW + +* How well do our scores correlate with external feedback signals? + + **Answer**: CAN NOT BE ANSWERED NOW \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-CONFIDENCE.md b/TSF/trustable/assertions/TA-CONFIDENCE.md index a6fc8e3276..206733dfc2 100644 --- a/TSF/trustable/assertions/TA-CONFIDENCE.md +++ b/TSF/trustable/assertions/TA-CONFIDENCE.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md --- Confidence in nlohmann/json library is measured based on results of analysis. diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md b/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md new file mode 100644 index 0000000000..2c123d8ee7 --- /dev/null +++ b/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md @@ -0,0 +1,29 @@ +#### Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Are the constraints grounded in realistic expectations, backed by real-world examples? + + **Answer**: The constraints originate from S-CORE (e.g. AOU-04, AOU-05, AOU-07, AOU-21), the standard RFC-8259 (e.g. AOU-05, AOU-20, AOU-21) and the library nlohmann/json itself (AOU-06, AOU-20) in order to ensure that the expectations are met. + +* Do they effectively guide downstream consumers in expanding upon existing Statements? + + **Answer**: ????? + +* Do they provide clear guidance for upstreams on reusing components with well-defined claims? + + **Answer**: ????? + +* Are any Statements explicitly designated as not reusable or adaptable? + + **Answer**: No statement has been intentionally designated as not reusable or adaptable. + +* Are there worked examples from downstream or upstream users demonstrating these constraints in practice? + + **Answer**: ???? + +* Have there been any documented misunderstandings from users, and are these visibly resolved? + + **Answer**: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) (cf. AOU-06) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). + +* Do external users actively keep up with updates, and are they properly notified of any changes? + + **Answer**: External users of the library are not necessarily automatically notified of an update, and are neither assumed nor required to keep up to date. If the external user forks the github repository, however, then github shows automatically whenever the upstream changes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS.md b/TSF/trustable/assertions/TA-CONSTRAINTS.md index 8cf85ca15e..76f8d2bb54 100644 --- a/TSF/trustable/assertions/TA-CONSTRAINTS.md +++ b/TSF/trustable/assertions/TA-CONSTRAINTS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md --- Constraints on adaptation and deployment of nlohmann/json library are specified. diff --git a/TSF/trustable/assertions/TA-DATA-CHECKLIST.md b/TSF/trustable/assertions/TA-DATA-CHECKLIST.md new file mode 100644 index 0000000000..a0830a0fd2 --- /dev/null +++ b/TSF/trustable/assertions/TA-DATA-CHECKLIST.md @@ -0,0 +1,41 @@ +#### Checklist for TA-DATA from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Is all test data stored with long-term accessibility? + + **Answer**: If we assume that github is long-term accessible, then yes. + +* Is all monitoring data stored with long-term accessibility? + + **Answer**: There are no monitoring data. + +* Are extensible data models implemented? + + **Answer**: The data are stored in an sqlite database. + +* Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy? + + **Answer**: There are no sensitive data produced, collected or stored. + +* Are proper backup mechanisms in place? + + **Answer**: Not more than the default mechanisms of github. + +* Are storage and backup limits tested? + + **Answer**: No. + +* Are all data changes traceable? + + **Answer**: Yes, due to the usage of github. + +* Are concurrent changes correctly managed and resolved? + + **Answer**: Yes, due to the usage of github. + +* Is data accessible only to intended parties? + + **Answer**: Since the library is open source, there are no unintended parties. + +* Are any subsets of our data being published? + + **Answer**: Yes, the collected data are publicly available. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-DATA.md b/TSF/trustable/assertions/TA-DATA.md index 19dda373d7..2e69be7b80 100644 --- a/TSF/trustable/assertions/TA-DATA.md +++ b/TSF/trustable/assertions/TA-DATA.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-DATA-CHECKLIST.md --- Data is collected from tests, and from monitoring of deployed software, according to specified objectives. diff --git a/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md b/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md new file mode 100644 index 0000000000..aa0d619c07 --- /dev/null +++ b/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md @@ -0,0 +1,62 @@ +#### Checklist for TA-FIXES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + + +* How many faults have we identified in XYZ? + + **Answer**: There are no identifiable faults concerning the expectations. + +* How many unknown faults remain to be found, based on the number that have been processed so far? + + **Answer**: It is unlikely that there are unknown faults concerning the expectations. + +* Is there any possibility that people could be motivated to manipulate the lists (e.g. bug bonus or pressure to close). + + **Answer**: Since the project is entirely open source, it is quite unlikely. + +* How many faults may be unrecorded (or incorrectly closed, or downplayed)? + + **Answer**: There may be none, at least when it concerns the expectations. + +* How do we collect lists of bugs and known vulnerabilities from components? + + **Answer**: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. This list is then stored using github, thereby enabling a traceability of the list. + +* How (and how often) do we check these lists for relevant bugs and known vulnerabilities? + + **Answer**: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is encouraged by the change of the trustable score to check this issue on applicability. + +* How confident can we be that the lists are honestly maintained? + + **Answer**: We can not imagine a reason why the list could be dishonestly maintained. + +* Could some participants have incentives to manipulate information? + + **Answer**: We can not think of a reason why. + +* How confident are we that the lists are comprehensive? + + **Answer**: We have no reason to assume that discovered bugs are not reported to nlohmann/json. + +* Could there be whole categories of bugs/vulnerabilities still undiscovered? + + **Answer**: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug, instead it is likely that perceived issues due to a misunderstanding of how the library works are labelled as bug. + +* How effective is our triage/prioritisation? + + **Answer**: ????? Since it is not intended to fix the library within S-CORE, but instead leave the development to the original nlohmann/json, there is no need to have a triage or prioritisation. + +* How many components have never been updated? + + **Answer**: None, the single component is up to date. + +* How confident are we that we could update them? + + **Answer**: If nlohmann/json would release an new version, we are very confident that we can update to that version. + +* How confident are we that outstanding fixes do not impact our Expectations? + + **Answer**: We have not found any outstanding fixes impacting our expectations. + +* How confident are we that outstanding fixes do not address Misbehaviours? + + **Answer**: For all of the none identified misbehaviours, we are very confident that none of the outstanding fixes do not address them. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-FIXES.md b/TSF/trustable/assertions/TA-FIXES.md index 353fa45d6f..435cf89f77 100644 --- a/TSF/trustable/assertions/TA-FIXES.md +++ b/TSF/trustable/assertions/TA-FIXES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-FIXES-CHECKLIST.md --- Known bugs or misbehaviours are analysed and triaged, and critical fixes or mitigations are implemented or applied. diff --git a/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md b/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md new file mode 100644 index 0000000000..0ca7405667 --- /dev/null +++ b/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md @@ -0,0 +1,49 @@ +#### Checklist for TA-INDICATORS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How appropriate/thorough are the analyses that led to the indicators? + + **Answer**: Since no misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259 have been identified, no warning indicators are implemented. + +* How confident can we be that the list of indicators is comprehensive? + + **Answer**: There are no warning indicators implemented, of which we are very confident. + +* Could there be whole categories of warning indicators still missing? + + **Answer**: Yes, there could. Within S-CORE, however, any warning indicator that is not natively implemented within the original nlohmann/json should be implemented in the wrapper defining the interface between the library and the project using it. + +* How has the list of advance warning indicators varied over time? + + **Answer**: It has stayed constant. + +* How confident are we that the indicators are leading/predictive? + + **Answer**: There are none. + +* Are there misbehaviours that have no advance warning indicators? + + **Answer**: There are no misbehaviours identified. + +* Can we collect data for all indicators? + + **Answer**: There are currently no implemented indicators, so that no data are collected. + +* Are the monitoring mechanisms used included in our Trustable scope? + + **Answer**: No, but there are also none. + +* Are there gaps or trends in the data? + + **Answer**: There are no data where gaps or trends could be identified. + +* If there are gaps or trends, are they analysed and addressed? + + **Answer**: There are no data. + +* Is the data actually predictive/useful? + + **Answer**: There are no data. + +* Are indicators from code, component, tool, or data inspections taken into consideration? + + **Answer**: There are no indicators. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-INDICATORS.md b/TSF/trustable/assertions/TA-INDICATORS.md index fc8e87391b..3aeced44cc 100644 --- a/TSF/trustable/assertions/TA-INDICATORS.md +++ b/TSF/trustable/assertions/TA-INDICATORS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md --- Advanced warning indicators for misbehaviours are identified, and monitoring mechanisms are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md b/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md new file mode 100644 index 0000000000..d624636ceb --- /dev/null +++ b/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md @@ -0,0 +1,63 @@ +#### Checklist for TA-INPUTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +The single_include/nlohmann/json.hpp is the single and only component of the library. + +* Are there components that are not on the list? + + **Answer**: No. + +* Are there assessments for all components? + + **Answer**: ????? + +* Has an assessment been done for the current version of the component? + + **Answer**: ????? + +* Have sources of bug and/or vulnerability data been identified? + + **Answer**: There are no bug and/or vulnerability data. + +* Have additional tests and/or Expectations been documented and linked to component assessment? + + **Answer**: ?????? + +* Are component tests run when integrating new versions of components? + + **Answer**: There are no further components. + +* Are there tools that are not on the list? + + **Answer**: The library does not use external tools, except for the tools provided by the C++ standard library. + +* Are there impact assessments for all tools? + + **Answer**: ?????? The library does not use external tools for which an impact assessment has to be done. + +* Have tools with high impact been qualified? + + **Answer**: There are no tools with high impact. + +* Were assessments or reviews done for the current tool versions? + + **Answer**: ????? The library does not use external tools for which an impact assessment has to be done. + +* Have additional tests and/or Expectations been documented and linked to tool assessments? + + **Answer**: No. + +* Are tool tests run when integrating new versions of tools? + + **Answer**: The library does not use external tools for which a new version needs to be integrated. + +* Are tool and component tests included in release preparation? + + **Answer**: Yes, the tests of the library are included in the release. + +* Can patches be applied, and then upstreamed for long-term maintenance? + + **Answer**: Yes, if ever a misbehaviour is found and patched, then a pull-request to the original nlohmann/json repository can be opened to upstream the changes. + +* Do all dependencies comply with acceptable licensing terms? + + **Answer**: Yes, the library is licensed under MIT License . \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-INPUTS.md b/TSF/trustable/assertions/TA-INPUTS.md index 325480ff97..c1a2339f53 100644 --- a/TSF/trustable/assertions/TA-INPUTS.md +++ b/TSF/trustable/assertions/TA-INPUTS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md --- All inputs to nlohmann/json library are assessed, to identify potential risks and issues. diff --git a/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md b/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md new file mode 100644 index 0000000000..5a26f8b75e --- /dev/null +++ b/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md @@ -0,0 +1,21 @@ +#### Checklist for TA-ITERATIONS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How much of the software is provided as binary only, expressed as a fraction of the BoM list? + + **Answer**: None. + +* How much is binary, expressed as a fraction of the total storage footprint? + + **Answer**: None. + +* For binaries, what claims are being made and how confident are we in the people/organisations making the claims? + + **Answer**: There are no binaries. + +* For third-party source code, what claims are we making, and how confident are we about these claims? + + **Answer**: There is no third-party source code in the library. + +* For software developed by us, what claims are we making, and how confident are we about these claims? + + **Answer**: This is the remainder of the documentation. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-ITERATIONS.md b/TSF/trustable/assertions/TA-ITERATIONS.md index 7137b3f324..7e9e6ac5d8 100644 --- a/TSF/trustable/assertions/TA-ITERATIONS.md +++ b/TSF/trustable/assertions/TA-ITERATIONS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md --- All constructed iterations of nlohmann/json library include source code, build instructions, tests, results and attestations. diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md b/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md new file mode 100644 index 0000000000..101fedf5b4 --- /dev/null +++ b/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md @@ -0,0 +1,43 @@ +#### Checklist for TA-METHODOLOGIES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +This project follows purely the Methodologies of Eclipse S-CORE. + +* Are the identified gaps documented clearly to justify using a manual process? + + **Answer**: + +* Are the goals for each process clearly defined? + + **Answer**: + +* Is the sequence of procedures documented in an unambiguous manner? + + **Answer**: + +* Can improvements to the processes be suggested and implemented? + + **Answer**: + +* How frequently are processes changed? + + **Answer**: + +* How are changes to manual processes communicated? + + **Answer**: + +* Are there any exceptions to the processes? + + **Answer**: + +* How is evidence of process adherence recorded? + + **Answer**: + +* How is the effectiveness of the process evaluated? + + **Answer**: + +* Is ongoing training required to follow these processes? + + **Answer**: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES.md b/TSF/trustable/assertions/TA-METHODOLOGIES.md index 3330900d98..7e5a3cac56 100644 --- a/TSF/trustable/assertions/TA-METHODOLOGIES.md +++ b/TSF/trustable/assertions/TA-METHODOLOGIES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md --- Manual methodologies applied for nlohmann/json library by contributors, and their results, are managed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md new file mode 100644 index 0000000000..773a481558 --- /dev/null +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md @@ -0,0 +1,49 @@ +#### Checklist for TA-MISBEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How has the list of misbehaviours varied over time? + + **Answer**: The list of misbehaviours is collected using github and its development is thereby understandable. + +* How confident can we be that this list is comprehensive? + + **Answer**: Due to the collaborative nature of the open source community, we deem it quite unlikely that there are any known misbehaviours which are not reported to the repository nlohmann/json. + +* How well do the misbehaviours map to the expectations? + + **Answer**: There are no identified misbehaviours that tangent the expectations. + +* Could some participants have incentives to manipulate information? + + **Answer**: We could not think of an incentive that any collaborateur could have to manipulate the information. + +* Could there be whole categories of misbehaviours still undiscovered? + + **Answer**: Due to the wide use and long-standing development of the library it is quite unlikely that any major misbehaviors, in particular regarding the parsing and validating of JSON data in the sense of RFC-8259, is undiscovered. + +* Can we identify misbehaviours that have been understood but not specified? + + **Answer**: No. + +* Can we identify some new misbehaviours, right now? + + **Answer**: No. + +* Is every misbehaviour represented by at least one fault induction test? + + **Answer**: Since there are no misbehaviours that concern the use within S-CORE, no. + +* Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately? + + **Answer**: ?????? No. + +* Are all the fault induction results actually collected? + + **Answer**: ?????? No. + +* Are the results evaluated? + + **Answer**: ?????? No. + +* Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations? + + **Answer**: Currently, there is no analysis which identifies additional misbehaviours. The only such analysis is indirectly via the analysis of the fuzz testing, which currently does not identifies additional misbehaviours. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md index fbb67f2e65..dc178ff789 100644 --- a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md --- Prohibited misbehaviours for nlohmann/json library are identified, and mitigations are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md b/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md new file mode 100644 index 0000000000..ece1bc453f --- /dev/null +++ b/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-RELEASES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How confident are we that all components are taken from within our controlled environment? + + **Answer**: This library does not take anything from outside of this repository. + +* How confident are we that all of the tools we are using are also under our control? + + **Answer**: The version of nlohmann/json that is documented with this documentation is under the full control of the Eclipse S-CORE organisation. + +* Are our builds repeatable on a different server, or in a different context? + + **Answer**: Since there is no "build" of the header-only library, yes. + +* How sure are we that our builds don't access the internet? + + **Answer**: There is no implemented access to the internet in the library itself. The testsuite is downloaded from a within Eclipse S-CORE. + +* How many of our components are non-reproducible? + + **Answer**: The single component is reproducible. + +* How confident are we that our reproducibility check is correct? + + **Answer**: Quite. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-RELEASES.md b/TSF/trustable/assertions/TA-RELEASES.md index 033b77df8e..8e1aa2c11e 100644 --- a/TSF/trustable/assertions/TA-RELEASES.md +++ b/TSF/trustable/assertions/TA-RELEASES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md --- Construction of nlohmann/json library releases is fully repeatable and the results are fully reproducible, with any exceptions documented and justified. diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md new file mode 100644 index 0000000000..dbb2e3f0a3 --- /dev/null +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-SUPPLY_CHAIN from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Could there be other components, missed from the list? + + **Answer**: Since the library does not contain any external components, no. + +* Does the list include all toolchain components? + + **Answer**: Since the library does not contain any external components, yes. + +* Does the toolchain include a bootstrap? + + **Answer**: ???? No. + +* Could the content of a mirrored project be compromised by an upstream change? + + **Answer**: Since the library does not contain any external components, no. + +* Are mirrored projects up to date with the upstream project? + + **Answer**: Yes, the library is up to date with the most recent release of the original nlohmann/json + +* Are mirrored projects based on the correct upstream? + + **Answer**: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md index 43939dc774..90f6b08f52 100644 --- a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md --- All sources for nlohmann/json library and tools are mirrored in our controlled environment. diff --git a/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md b/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md new file mode 100644 index 0000000000..8f05b60bdd --- /dev/null +++ b/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-TESTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible? + + **Answer**: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO, cf. AOU-14) + +* Are any exceptions identified, documented and justified? + + **Answer**: To the best of our understanding, there are no exceptions identified. + +* How confident are we that all test components are taken from within our controlled environment? + + **Answer**: All tests are either self-contained or download test data from [within Eclipse S-CORE](https://github.com/eclipse-score/inc_nlohmann_json/tree/json_test_data_version_3_1_0_mirror). + +* How confident are we that all of the test environments we are using are also under our control? + + **Answer**: ???? The environments are standard docker images of ubuntu and standard versions of compilers. + +* Do we record all test environment components, including hardware and infrastructure used for exercising tests and processing input/output data? + + **Answer**: No, since the tests are independent from hard-ware, these data are not collected. + +* How confident are we that all tests scenarios are repeatable? + + **Answer**: All test scenarios are repeated daily in the CI pipeline. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-TESTS.md b/TSF/trustable/assertions/TA-TESTS.md index 474886070a..e78fea3221 100644 --- a/TSF/trustable/assertions/TA-TESTS.md +++ b/TSF/trustable/assertions/TA-TESTS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-TESTS-CHECKLIST.md --- All tests for nlohmann/json library, and its build and test environments, are constructed from controlled/mirrored sources and are reproducible, with any exceptions documented. diff --git a/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md b/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md new file mode 100644 index 0000000000..482d412201 --- /dev/null +++ b/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-UPDATES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Where are the change and configuration management controls specified? + + **Answer**: WIP + +* Are these controls enforced for all of components, tools, data, documentation and configurations? + + **Answer**: The S-CORE Methodology is followed, compliance with which enforces the change process to be followed. + +* Are there any ways in which these controls can be subverted, and have we mitigated them? + + **Answer**: Yes, the change process can just not be followed. We have no real method to enforce it other than to trust that the committers follow the S-CORE processes. + +* Does change control capture all potential regressions? + + **Answer**: Due to the test coverage of 99.186%, it is unlikely that a potential regression is not captured. + +* Is change control timely enough? + + **Answer**: Not applicable, as far as can be understood right now, there is no immanent need to keep the library up to date. + +* Are all guidance and checks understandable and consistently followed? + + **Answer**: WIP \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-UPDATES.md b/TSF/trustable/assertions/TA-UPDATES.md index c54015ba90..0113c23a4f 100644 --- a/TSF/trustable/assertions/TA-UPDATES.md +++ b/TSF/trustable/assertions/TA-UPDATES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md --- nlohmann/json library components, configurations and tools are updated under specified change and configuration management controls. diff --git a/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md b/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md new file mode 100644 index 0000000000..8e23bae526 --- /dev/null +++ b/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md @@ -0,0 +1,55 @@ +#### Checklist for TA-VALIDATION from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +I DO NOT FEEL CONFIDENT TO **Answer** THIS! + +* Is the selection of tests correct? + + **Answer**: ???? Who could tell this? + +* Are the tests executed enough times? + + **Answer**: ???? Define "enough times" + +* How confident are we that all test results are being captured? + + **Answer**: ???? How fine-grained is a test-result supposed to be? + +* Can we look at any individual test result, and establish what it relates to? + + **Answer**: ???? + +* Can we trace from any test result to the expectation it relates to? + + **Answer**: No, there are more tests than expectations, and in particular tests that relate to the inner workings of the library which are not used by S-CORE. + +* Can we identify precisely which environment (software and hardware) were used? + + **Answer**: ???? How precisely shall that be? Moreover, the tests are supposed to run independent of underlying hardware, since this is a software. + +* How many pass/fail results would be expected, based on the scheduled tests? + + **Answer**: Zero fails. + +* Do we have all of the expected results? + + **Answer**: Yes. + +* Do we have time-series data for all of those results? + + **Answer**: Yes, there are time-series data. + +* If there are any gaps, do we understand why? + + **Answer**: ???? Define gaps + +* Are the test validation strategies credible and appropriate? + + **Answer**: ???? Define test validation strategies + +* What proportion of the implemented tests are validated? + + **Answer**: ???? None. + +* Have the tests been verified using known good and bad data? + + **Answer**: ???? \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-VALIDATION.md b/TSF/trustable/assertions/TA-VALIDATION.md index 08cba3497e..6aba0b230c 100644 --- a/TSF/trustable/assertions/TA-VALIDATION.md +++ b/TSF/trustable/assertions/TA-VALIDATION.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md --- All specified tests are executed repeatedly, under defined conditions in controlled environments, according to specified objectives. diff --git a/TSF/trustable/assumptions-of-use/AOU-29.md b/TSF/trustable/assumptions-of-use/AOU-29.md new file mode 100644 index 0000000000..aba111340a --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-29.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall evaluate the necessity of monitoring mechanisms and implement these in score-json together with a persistent storage of monitoring data as needed. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-09.md b/TSF/trustable/no-json-faults/NJF-09.md index cfb8476319..cb5edb9a4a 100644 --- a/TSF/trustable/no-json-faults/NJF-09.md +++ b/TSF/trustable/no-json-faults/NJF-09.md @@ -21,6 +21,12 @@ references: name: "lexer::scan" path: "include/nlohmann/detail/input/lexer.hpp" description: "function, which is called by parser::sax_parse_internal to read input data" + - type: JSON_testsuite + name: "json.org examples;1.json" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + description: "Checks that a valid json object containing all six structural characters is accepted." score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 diff --git a/TSF/trustable/no-json-faults/NJF-11.md b/TSF/trustable/no-json-faults/NJF-11.md index 348110fcad..2953bdc895 100644 --- a/TSF/trustable/no-json-faults/NJF-11.md +++ b/TSF/trustable/no-json-faults/NJF-11.md @@ -9,6 +9,21 @@ references: name: "lexer::skip_whitespace" path: "include/nlohmann/detail/input/lexer.hpp" description: "function, which skips admissible whitespace during reading" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-literals.cpp" + - type: cpp_test + name: "accept;whitespace;Leading and tailing" + path: "TSF/tests/unit-numbers.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-strings.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-arrays.cpp" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 diff --git a/TSF/trustable/statements/JLS-01.md b/TSF/trustable/statements/JLS-01.md index 0c7859937f..1dd87a7a0e 100644 --- a/TSF/trustable/statements/JLS-01.md +++ b/TSF/trustable/statements/JLS-01.md @@ -4,9 +4,12 @@ normative: true references: - type: file path: .github/workflows/parent-workflow.yml - - type: web_content - url: https://github.com/score-json/json/settings/branch_protection_rules/65227858 - description: "branch protection rule for main branch specifying that failures of tests prevent merge." +evidence: + type: did_workflows_fail + configuration: + owner: "score-json" + repo: "json" + branch: "main" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 diff --git a/TSF/trustable/statements/JLS-05.md b/TSF/trustable/statements/JLS-05.md index 5560530589..70714a9622 100644 --- a/TSF/trustable/statements/JLS-05.md +++ b/TSF/trustable/statements/JLS-05.md @@ -17,6 +17,12 @@ references: - type: project_website url: "https://github.com/nlohmann/json/pulse" description: "presents activity over the past week" + - type: project_website + url: "https://github.com/orgs/score-json/discussions/27#discussion-8594385" + description: "comparison between JSON libraries demonstrating the popularity of nlohmann/json" + - type: project_website + url: "https://json.nlohmann.me/home/customers/" + description: "list of large projects using nlohmann/json" evidence: type: https_response_time configuration: diff --git a/TSF/trustable/statements/JLS-06.md b/TSF/trustable/statements/JLS-06.md index 298547bfa9..33f7ea037d 100644 --- a/TSF/trustable/statements/JLS-06.md +++ b/TSF/trustable/statements/JLS-06.md @@ -2,19 +2,10 @@ level: 1.1 normative: true references: - - type: website - url: https://github.com/score-json/json/settings/branches - description: "Branch protection policies" -evidence: - type: "check_artifact_exists" - configuration: - check_amalgamation: include - codeql: include - dependency_review: include - labeler: include - publish_documentation: include - test_trudag_extensions: include - ubuntu: include + - type: workflow_failures + owner: "nlohmann" + repo: "json" + branch: "master" --- Changes to the code (main branch) are applied only after code review and passing of all pipelines. diff --git a/TSF/trustable/statements/JLS-07.md b/TSF/trustable/statements/JLS-07.md index a4a52df3b7..0c8d55f74c 100644 --- a/TSF/trustable/statements/JLS-07.md +++ b/TSF/trustable/statements/JLS-07.md @@ -1,10 +1,6 @@ --- level: 1.1 normative: true -references: - - type: web_content - url: https://github.com/score-json/json/settings/branch_protection_rules/65227858 - description: "branch protection rule for main branch specifying that failures of tests prevent merge." score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 diff --git a/TSF/trustable/statements/JLS-11.md b/TSF/trustable/statements/JLS-11.md index 79f0a3489c..c5fd26f1c6 100644 --- a/TSF/trustable/statements/JLS-11.md +++ b/TSF/trustable/statements/JLS-11.md @@ -4,7 +4,7 @@ normative: true evidence: type: check_issues configuration: - release_date: "2025-04-11T00:00:00Z" + release_date: "2025-04-11T08:43:39Z" list_of_known_misbehaviours: "./TSF/docs/nlohmann_misbehaviours_comments.md" --- diff --git a/TSF/trustable/statements/JLS-14.md b/TSF/trustable/statements/JLS-14.md index 374c7df727..21415a7369 100644 --- a/TSF/trustable/statements/JLS-14.md +++ b/TSF/trustable/statements/JLS-14.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: website - url: "/nlohmann/json/tree/v3.12.0" + url: "https://github.com/nlohmann/json/tree/v3.12.0" description: "release site of nlohmann/json containing the sha values" evidence: type: sha_checker diff --git a/TSF/trustable/statements/JLS-26.md b/TSF/trustable/statements/JLS-26.md index dc3828025b..7aa421b105 100644 --- a/TSF/trustable/statements/JLS-26.md +++ b/TSF/trustable/statements/JLS-26.md @@ -2,12 +2,13 @@ level: 1.1 normative: true references: - - type: web_content - url: https://github.com/score-json/json/settings/branch_protection_rules/65227858 - description: "branch protection rule for main branch specifying that failures of any workflow prevent merge." + - type: workflow_failures + owner: "nlohmann" + repo: "json" + branch: "master" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 --- -Pipeline execution results are analyzed in the fork and the original nlohmann/json repository. \ No newline at end of file +Pipeline execution results are analyzed in nlohmann/json. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-27.md b/TSF/trustable/statements/JLS-27.md new file mode 100644 index 0000000000..62cc61cd9b --- /dev/null +++ b/TSF/trustable/statements/JLS-27.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +evidence: + type: coveralls_reporter + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 +--- + +The test coverage for this version of nlohmann/json is monitored using Coveralls and is not decreasing over time, unless reasonably justified. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-09.md b/TSF/trustable/well-formed-json/WFJ-09.md deleted file mode 100644 index 87f8fe40df..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-09.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service checks that numbers are represented in base 10 using decimal digits with an optional prefixed minus sign according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-10.md b/TSF/trustable/well-formed-json/WFJ-10.md deleted file mode 100644 index 434cbfc9c9..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-10.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service checks that strings are represented by the similar convention used in C programming languages. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-11.md b/TSF/trustable/well-formed-json/WFJ-11.md deleted file mode 100644 index ece534fee6..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-11.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service checks that JSON is only serialized and deserialized using UTF-8. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-12.md b/TSF/trustable/well-formed-json/WFJ-12.md deleted file mode 100644 index 2ec6cab6e8..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-12.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service provides implementations that parse JSON texts, which ignores the presence of a byte order mark rather than treating it as an error. \ No newline at end of file diff --git a/tests/src/unit-regression2.cpp b/tests/src/unit-regression2.cpp index 2c3977fef9..bce447e363 100644 --- a/tests/src/unit-regression2.cpp +++ b/tests/src/unit-regression2.cpp @@ -388,7 +388,7 @@ struct Example_3810 Example_3810() = default; }; -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Example_3810, bla); // NOLINT(misc-use-internal-linkage) +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Example_3810, bla) // NOLINT(misc-use-internal-linkage) TEST_CASE("regression tests 2") { diff --git a/tests/src/unit-user_defined_input.cpp b/tests/src/unit-user_defined_input.cpp index befc4b17af..5115e8fd30 100644 --- a/tests/src/unit-user_defined_input.cpp +++ b/tests/src/unit-user_defined_input.cpp @@ -60,12 +60,12 @@ TEST_CASE("Custom container member begin/end") { const char* data; - const char* begin() const + const char* begin() const noexcept { return data; } - const char* end() const + const char* end() const noexcept { return data + strlen(data); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) }