From 61e5c87a9b0ca58ad85d9ce5d8c99cb8e79e5055 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Wed, 15 Oct 2025 06:11:24 +0000 Subject: [PATCH 01/67] move listing of potential misbehaviours * printing misbehaviours with generate_list_of_misbehaviours.py * replacing bash script in publish_documentation.yml --- .github/workflows/publish_documentation.yml | 48 +------------------ TSF/scripts/generate_list_of_misbehaviours.py | 45 +++++++++++++++++ 2 files changed, 47 insertions(+), 46 deletions(-) create mode 100644 TSF/scripts/generate_list_of_misbehaviours.py diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml index 63106da7d2..b18158308e 100644 --- a/.github/workflows/publish_documentation.yml +++ b/.github/workflows/publish_documentation.yml @@ -111,52 +111,8 @@ jobs: # Fetch closed issues from the nlohmann/json repository gh issue list --repo "$REPO" --state closed --limit 10000 --json number,title,state,createdAt,url,labels > raw_closed_issues.json - # Add title to the output file - echo "# Misbehaviours Report" > $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - echo "This report lists known misbehaviours or bugs of v3.12.0 of the nlohmann/json repository. The misbehaviours are compiled from github issues of the nlohmann/json repository, and link to each corresponding issue." >> $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - - # Add subtitle for open issues - echo "## Open Issues" >> $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - - # Filter raw open issues for labels containing "bug" and convert to output .md file - jq -r ' - map(select(.labels[]?.name | test("bug"; "i"))) | - map("### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n") | - .[] - ' raw_open_issues.json >> $OUTPUT_FILE - - jq -c ' - .[] - | select(.labels[]?; .name | test("bug"; "i")) - | { n: .number - , md: "### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n" - } - ' raw_open_issues.json | - while IFS= read -r rec; do - num=$(jq -r '.n' <<< "$rec") - md=$(jq -r '.md' <<< "$rec") - py_out=$(python3 ./TSF/scripts/identify_nlohmann_issue.py "$num") - { - printf "%s\n" "$md" - printf "%s\n" "$py_out" - } >> "$OUTPUT_FILE" - done - - # Add subtitle for closed issues - echo "" >> $OUTPUT_FILE - echo "## Closed Issues (since v3.12.0)" >> $OUTPUT_FILE - echo "" >> $OUTPUT_FILE - - # Filter raw closed issues for labels containing "bug", created after release date of nlohmann/json version in use, and convert to output .md file - jq -r ' - map(select(.labels[]?.name | test("bug"; "i"))) | - map(select(.createdAt > "2025-04-11T00:00:00Z")) | # Adjust date as needed, 2025-04-11 corresponds to release v3.12.0 of nlohmann/json - map("### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n") | - .[] - ' raw_closed_issues.json >> $OUTPUT_FILE + # print the list of misbehaviours + python3 ./TSF/scripts/generate_list_of_misbehaviours.py > $OUTPUT_FILE # Push misbehaviours file to save_historical_data branch git add TSF/misbehaviours.md diff --git a/TSF/scripts/generate_list_of_misbehaviours.py b/TSF/scripts/generate_list_of_misbehaviours.py new file mode 100644 index 0000000000..485f0005c8 --- /dev/null +++ b/TSF/scripts/generate_list_of_misbehaviours.py @@ -0,0 +1,45 @@ +import json +from datetime import datetime, timezone +from identify_nlohmann_issue import comment_nlohmann_misbehaviours + +version = "3.12.0" +release_date = "2025-04-11T08:43:39Z" + +if __name__ == "__main__": + release_time = datetime.strptime(release_date,"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).timestamp() + + # fetch relevant issues + with open("raw_open_issues.json") as list_1: + all_open_issues = json.load(list_1) + relevant_open_issues = [all_open_issues[i] for i in range(0,len(all_open_issues)) + if len(all_open_issues[i].get("labels",[]))!=0 + and (all_open_issues[i].get("labels"))[0].get("name") == "kind: bug" + ] + with open("raw_closed_issues.json") as list_2: + all_closed_issues = json.load(list_2) + relevant_closed_issues = [all_closed_issues[i] for i in range(0,len(all_closed_issues)) + if len(all_closed_issues[i].get("labels",[]))!=0 + and (all_closed_issues[i].get("labels"))[0].get("name") == "kind: bug" + and datetime.strptime(all_closed_issues[i].get("createdAt","2000-01-01T00:00:00Z"),"%Y-%m-%dT%H:%M:%SZ") + .replace(tzinfo=timezone.utc) + .timestamp() + >=release_time + ] + + print("# Misbehaviours Report\n") + print(f"This report lists known misbehaviours or bugs of version {version} of the nlohmann/json repository.") + print("The misbehaviours are compiled from github issues of the nlohmann/json repository, and link to each corresponding issue.\n") + + + print("## Open issues\n") + for issue in relevant_open_issues: + print(f"### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n") + comment_nlohmann_misbehaviours(int(issue.get("number"))) + print("\n") + + print(f"\n## Closed Issues (since version {version}\n") + for issue in relevant_closed_issues: + print(f"### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n") + comment_nlohmann_misbehaviours(int(issue.get("number"))) + print("\n") + From b437a89dcc428c3a1731c65fd2e66e6556c92265 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Wed, 15 Oct 2025 07:51:55 +0000 Subject: [PATCH 02/67] fix url --- TSF/trustable/statements/JLS-14.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-14.md b/TSF/trustable/statements/JLS-14.md index 374c7df727..21415a7369 100644 --- a/TSF/trustable/statements/JLS-14.md +++ b/TSF/trustable/statements/JLS-14.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: website - url: "/nlohmann/json/tree/v3.12.0" + url: "https://github.com/nlohmann/json/tree/v3.12.0" description: "release site of nlohmann/json containing the sha values" evidence: type: sha_checker From 3a1cbdc3288d731897da13c9f1df3359aaa93af2 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Wed, 15 Oct 2025 08:40:54 +0000 Subject: [PATCH 03/67] auto-update JLS-11 --- TSF/trustable/statements/JLS-11.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-11.md b/TSF/trustable/statements/JLS-11.md index ea3d2a7fb1..e95b6eba81 100644 --- a/TSF/trustable/statements/JLS-11.md +++ b/TSF/trustable/statements/JLS-11.md @@ -4,7 +4,7 @@ normative: true evidence: type: issue_checker configuration: - release_date: "2025-04-11T00:00:00Z" + release_date: "2025-04-11T08:43:39Z" list_of_known_misbehaviours: "./TSF/docs/nlohmann_misbehaviours_comments.md" --- From d9fd2c7dad84cda3445a43dd5508dff71ddabcf0 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 06:24:13 +0000 Subject: [PATCH 04/67] add reference and validators for numbers of failed workflows --- .dotstop_extensions/references.py | 47 ++++++++++++++++++++++++++++++- .dotstop_extensions/validators.py | 38 +++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index a6406eb3ab..eebdb952f9 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -4,6 +4,7 @@ from trudag.dotstop.core.reference.references import LocalFileReference import requests import sqlite3 +import re # Constants MAX_JSON_LINES_FOR_DISPLAY = 25 @@ -788,4 +789,48 @@ def as_markdown(self, filepath: None | str = None) -> str: return result def __str__(self) -> str: - return str(self._path) \ No newline at end of file + return str(self._path) + +class NumberOfFailures(BaseReference): + def __init__(self, owner: str, repo: str, branch: str | None = None) -> None: + self._owner = owner + self._repo = repo + self._branch = branch + + @classmethod + def type(cls) -> str: + return "workflow_failures" + + @property + def content(self) -> bytes: + # build the url + url = f"https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure" + if self._branch is not None: + url += "+branch%3A{self._branch}" + # ask the website + res = requests.get(url) + # if call is not successful, raise an error + if res.status_code != 200: + candidate = f"The url {url} is not reachable, so that the number of failed workflows can not be fetched!" + raise RuntimeError(candidate) + # otherwise fetch the number printed in the head of the table + m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) + if m is None: + candidate = f"The number of failed workflows can not be found, please check that the table head contains \"XX workflow run results\"!" + raise RuntimeError(candidate) + return m.group(1).encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + # If we did not add a description, nothing is printed + if (self._description == ""): + return f"`{self._url}`" + # else, we print the description below the url + return f"`{self._url}`\n"+make_md_bullet_point(self._description,1) + + def __str__(self) -> str: + # this is used as a title in the trudag report + if self._branch is not None: + result = f"failures on branch {self._branch} of {self._owner}/{self._repo}" + else: + result = f"failures on {self._owner}/{self._repo}" + return result \ No newline at end of file diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index afdbd2519b..b270c6a741 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -5,6 +5,8 @@ import hashlib import json from datetime import datetime, timezone +import re +import subprocess yaml: TypeAlias = str | int | float | list["yaml"] | dict[str, "yaml"] @@ -322,3 +324,39 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception return(0.0,[]) # If you are here, then there are no applicable misbehaviours. return (1.0, []) + +def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + owner = configuration.get("owner",None) + if owner is None: + return (0.0, RuntimeError("The owner is not specified.")) + repo = configuration.get("repo",None) + if repo is None: + return (0.0, RuntimeError("The repository is not specified.")) + event = configuration.get("event","push") + url = f"https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure" + branch = configuration.get("branch",None) + if branch is not None: + url += "+branch%3A{branch}" + res = requests.get(url) + if res.status_code != 200: + return (0.0, RuntimeError(f"The website {url} can not be successfully reached!")) + m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) + if m is None: + return (0.0, RuntimeError("The number of failed workflows can not be found.")) + if m.group(1).strip() != "0": + return (0.0, Warning("There are failed workflows!")) + return (1.0, []) + +def is_branch_protected(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + branch = configuration.get("branch",None) + if branch is None: + return (0.0, RuntimeError("The branch is not specified.")) + res = subprocess.run(["git", "diff", "--cached", "quiet"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) + if res.returncode != 0: + raise RuntimeError("There are currently staged changes. Please unstage to proceed.") + try: + subprocess.run(["git","push","origin",f"HEAD:{branch}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) + return (0.0, RuntimeError(f"The branch {branch} is not protected!")) + except: + return (1.0, []) + From eba15e6b313778c5babed5d3c2571a439223724f Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 06:24:35 +0000 Subject: [PATCH 05/67] implement validators and reference --- TSF/trustable/statements/JLS-01.md | 9 ++++++--- TSF/trustable/statements/JLS-06.md | 7 ++++--- TSF/trustable/statements/JLS-07.md | 8 ++++---- TSF/trustable/statements/JLS-26.md | 6 +++--- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/TSF/trustable/statements/JLS-01.md b/TSF/trustable/statements/JLS-01.md index 0c7859937f..1dd87a7a0e 100644 --- a/TSF/trustable/statements/JLS-01.md +++ b/TSF/trustable/statements/JLS-01.md @@ -4,9 +4,12 @@ normative: true references: - type: file path: .github/workflows/parent-workflow.yml - - type: web_content - url: https://github.com/score-json/json/settings/branch_protection_rules/65227858 - description: "branch protection rule for main branch specifying that failures of tests prevent merge." +evidence: + type: did_workflows_fail + configuration: + owner: "score-json" + repo: "json" + branch: "main" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 diff --git a/TSF/trustable/statements/JLS-06.md b/TSF/trustable/statements/JLS-06.md index 298547bfa9..9d1ab173ba 100644 --- a/TSF/trustable/statements/JLS-06.md +++ b/TSF/trustable/statements/JLS-06.md @@ -2,9 +2,10 @@ level: 1.1 normative: true references: - - type: website - url: https://github.com/score-json/json/settings/branches - description: "Branch protection policies" + - type: workflow_failures + owner: "score-json" + repo: "json" + branch: "main" evidence: type: "check_artifact_exists" configuration: diff --git a/TSF/trustable/statements/JLS-07.md b/TSF/trustable/statements/JLS-07.md index a4a52df3b7..a55a9965c8 100644 --- a/TSF/trustable/statements/JLS-07.md +++ b/TSF/trustable/statements/JLS-07.md @@ -1,10 +1,10 @@ --- level: 1.1 normative: true -references: - - type: web_content - url: https://github.com/score-json/json/settings/branch_protection_rules/65227858 - description: "branch protection rule for main branch specifying that failures of tests prevent merge." +evidence: + type: is_branch_protected + configuration: + branch: main score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 diff --git a/TSF/trustable/statements/JLS-26.md b/TSF/trustable/statements/JLS-26.md index dc3828025b..59a5717646 100644 --- a/TSF/trustable/statements/JLS-26.md +++ b/TSF/trustable/statements/JLS-26.md @@ -2,9 +2,9 @@ level: 1.1 normative: true references: - - type: web_content - url: https://github.com/score-json/json/settings/branch_protection_rules/65227858 - description: "branch protection rule for main branch specifying that failures of any workflow prevent merge." + - type: workflow_failures + owner: "score-json" + repo: "json" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 From 3358c270fae58c82b78c2ac53e710afb0bc08a97 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 06:48:17 +0000 Subject: [PATCH 06/67] document validators --- .dotstop_extensions/README.md | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 3a982501a8..f2cdd512e3 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -254,7 +254,7 @@ The test-files are called unit-FILE_NAME.cpp. In the configuration, FILE_NAME is For each test specified in test-files, the number of passed and failed test-cases is calculated, while the number of skipped test-cases is ignored. The score of each test is then the ratio of passed test-cases compared to all non-skipped test-cases; the total score is the mean of the individual scores. -## issue_checker +## check_issues The automatic validator `check_issues` is intended to evaluate the json-lists `raw_open_issues.json` and `raw_closed_issues.json` and compare with the list of known issues of nlohmann/json labelled as bug opened since the release of the version of nlohmann/json that is documented. The json lists are generated in the publish_documentation-Workflow, and not persistently stored. @@ -277,6 +277,38 @@ From `raw_closed_issues.json`, all issue IDs are collected, which are labelled a If for any of these IDs, it is not explicitly indicated in the list of known misbehaviours that this issue does not apply to Eclipse S-CORE, then the score 0.0 is returned. Otherwise, the score 1.0 is assigned. +## did_workflows_fail + +The automatic validator `did_workflows_fail` queries the web-site `https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure+branch%3A{branch}` and looks on the number of workflow run results which is printed at the head of the table. +In case that this number is not zero, a score of 0.0 is returned, and 1.0 otherwise. + +The expected configuration is given as follows: + +``` +evidence: + type: check_issues + configuration: + owner: eclipse-score # owner of the repository + repo: inc_nlohmann_json # name of the repository + branch: json_version_3_12_0 # name of the branch + action: push # optional, default is push +``` + +## is_branch_protected + +The automatic validator `is_branch_protected` tries to push to the specified branch, i.e. to execute the command `git push origin HEAD:{branch}`. +In case any changes are staged during the execution of the validator, an error is thrown before the push occurs. +Since the validator is intended to be executed during a workflow run, where no change is staged, it is no expected that the error is thrown. + +The expected configuration is given as follows: + +``` +evidence: + type: check_issues + configuration: + branch: json_version_3_12_0 # name of the branch +``` + # Data store interface From c3c17db37f19c602ff69c388146b9fedea06fe38 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 06:56:48 +0000 Subject: [PATCH 07/67] document reference --- .dotstop_extensions/README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index f2cdd512e3..cf16f88cec 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -192,6 +192,26 @@ references: --- ``` +## NumberOfFailures + +This reference queries `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure+branch%3A{self._branch}` and collects the number of failed workflow runs as its content. +Here, owner, repo and branch are the arguments given to the constructor of the reference. +If no branch is specified, then all failures are collected, i.e. `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure` is queried. +In case the website is un-reachable, or the github layout changes drastically so that the number of failed workflow runs does not exists at the expected location, an error is thrown. + +The expected configuration is + +``` +--- +... +references: +- type: workflow_failures + owner: "eclipse-score" + repo: "inc_nlohmann_json" + branch: "json_version_3_12_0" +--- +``` + # Validators Validators are extensions of trudag, used to validate any data that can be reduced to a floating point metric. The resulting scores are used as evidence for the trustability of items in the trustable graph. From 8f946703ee4a8857bf8970cc3843cfb4255161bc Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 06:57:02 +0000 Subject: [PATCH 08/67] add as_markdown --- .dotstop_extensions/references.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index eebdb952f9..1c67219b2e 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -821,11 +821,10 @@ def content(self) -> bytes: return m.group(1).encode('utf-8') def as_markdown(self, filepath: None | str = None) -> str: - # If we did not add a description, nothing is printed - if (self._description == ""): - return f"`{self._url}`" - # else, we print the description below the url - return f"`{self._url}`\n"+make_md_bullet_point(self._description,1) + if self._branch is not None: + return f"{self.content().decode('utf-8')} workflows failed on {self._owner}/{self._repo}" + else: + return f"{self.content().decode('utf-8')} workflows failed on branch {self._branch} of {self._owner}/{self._repo}" def __str__(self) -> str: # this is used as a title in the trudag report From 53c9ab1ddf897783e89bdc93496e703f721b5827 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 08:18:42 +0000 Subject: [PATCH 09/67] add utterly important quotation marks --- .dotstop_extensions/README.md | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index cf16f88cec..2333ed9a82 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -306,14 +306,16 @@ The expected configuration is given as follows: ``` evidence: - type: check_issues + type: did_workflows_fail configuration: - owner: eclipse-score # owner of the repository - repo: inc_nlohmann_json # name of the repository - branch: json_version_3_12_0 # name of the branch - action: push # optional, default is push + owner: "eclipse-score" # owner of the repository + repo: "inc_nlohmann_json" # name of the repository + branch: "json_version_3_12_0" # name of the branch + action: "push" # optional, default is push ``` +It is of utmost importance that the arguments come with quotation marks, otherwise, the update helper does not work as intended. + ## is_branch_protected The automatic validator `is_branch_protected` tries to push to the specified branch, i.e. to execute the command `git push origin HEAD:{branch}`. @@ -324,11 +326,13 @@ The expected configuration is given as follows: ``` evidence: - type: check_issues + type: is_branch_protected configuration: - branch: json_version_3_12_0 # name of the branch + branch: "json_version_3_12_0" # name of the branch ``` +It is of utmost importance that the arguments come with quotation marks, otherwise, the update helper does not work as intended. + # Data store interface From edc959839a9330ace8832b0efbe15195655926a9 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 08:53:22 +0000 Subject: [PATCH 10/67] add quotation marks --- TSF/trustable/statements/JLS-07.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-07.md b/TSF/trustable/statements/JLS-07.md index a55a9965c8..4c327f2f77 100644 --- a/TSF/trustable/statements/JLS-07.md +++ b/TSF/trustable/statements/JLS-07.md @@ -4,7 +4,7 @@ normative: true evidence: type: is_branch_protected configuration: - branch: main + branch: "main" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 From 9f70c6c829a21a163bb8570d975369006f0f3ba9 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 13:01:44 +0000 Subject: [PATCH 11/67] fix as_markdown --- .dotstop_extensions/references.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index 1c67219b2e..747093e06d 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -822,9 +822,9 @@ def content(self) -> bytes: def as_markdown(self, filepath: None | str = None) -> str: if self._branch is not None: - return f"{self.content().decode('utf-8')} workflows failed on {self._owner}/{self._repo}" + return f"{self.content.decode('utf-8')} workflows failed on {self._owner}/{self._repo}" else: - return f"{self.content().decode('utf-8')} workflows failed on branch {self._branch} of {self._owner}/{self._repo}" + return f"{self.content.decode('utf-8')} workflows failed on branch {self._branch} of {self._owner}/{self._repo}" def __str__(self) -> str: # this is used as a title in the trudag report From 6e153950c12c32b0aeea5c9841106124cd1c82a2 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 13:22:10 +0000 Subject: [PATCH 12/67] delete unused items --- TSF/trustable/well-formed-json/WFJ-09.md | 6 ------ TSF/trustable/well-formed-json/WFJ-10.md | 6 ------ TSF/trustable/well-formed-json/WFJ-11.md | 6 ------ TSF/trustable/well-formed-json/WFJ-12.md | 6 ------ 4 files changed, 24 deletions(-) delete mode 100644 TSF/trustable/well-formed-json/WFJ-09.md delete mode 100644 TSF/trustable/well-formed-json/WFJ-10.md delete mode 100644 TSF/trustable/well-formed-json/WFJ-11.md delete mode 100644 TSF/trustable/well-formed-json/WFJ-12.md diff --git a/TSF/trustable/well-formed-json/WFJ-09.md b/TSF/trustable/well-formed-json/WFJ-09.md deleted file mode 100644 index 87f8fe40df..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-09.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service checks that numbers are represented in base 10 using decimal digits with an optional prefixed minus sign according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-10.md b/TSF/trustable/well-formed-json/WFJ-10.md deleted file mode 100644 index 434cbfc9c9..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-10.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service checks that strings are represented by the similar convention used in C programming languages. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-11.md b/TSF/trustable/well-formed-json/WFJ-11.md deleted file mode 100644 index ece534fee6..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-11.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service checks that JSON is only serialized and deserialized using UTF-8. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-12.md b/TSF/trustable/well-formed-json/WFJ-12.md deleted file mode 100644 index 2ec6cab6e8..0000000000 --- a/TSF/trustable/well-formed-json/WFJ-12.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -level: 1.1 -normative: true ---- - -The service provides implementations that parse JSON texts, which ignores the presence of a byte order mark rather than treating it as an error. \ No newline at end of file From eedfe1bf64393b571e9edd1725ea1751dc32b172 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 16 Oct 2025 13:27:36 +0000 Subject: [PATCH 13/67] add test --- TSF/trustable/no-json-faults/NJF-09.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/TSF/trustable/no-json-faults/NJF-09.md b/TSF/trustable/no-json-faults/NJF-09.md index 9639bb52da..50ae6b730c 100644 --- a/TSF/trustable/no-json-faults/NJF-09.md +++ b/TSF/trustable/no-json-faults/NJF-09.md @@ -18,6 +18,12 @@ references: name: "lexer::scan" path: "include/nlohmann/detail/input/lexer.hpp" description: "function, which is called by parser::sax_parse_internal to read input data" + - type: JSON_testsuite + name: "json.org examples;1.json" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + description: "Checks that a valid json object containing all six structural characters is accepted." score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 From a8820089129757cd52513d6d5b4f843bf110b39f Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 05:41:45 +0000 Subject: [PATCH 14/67] add whitespace tests --- TSF/tests/unit-arrays.cpp | 20 ++++++++++ TSF/tests/unit-literals.cpp | 51 ++++++++++++++++++++++++++ TSF/tests/unit-strings.cpp | 19 ++++++++++ TSF/trustable/no-json-faults/NJF-11.md | 15 ++++++++ 4 files changed, 105 insertions(+) diff --git a/TSF/tests/unit-arrays.cpp b/TSF/tests/unit-arrays.cpp index 3e94f0c3df..8ce0f087a0 100644 --- a/TSF/tests/unit-arrays.cpp +++ b/TSF/tests/unit-arrays.cpp @@ -48,6 +48,26 @@ TEST_CASE("accept") // double check if rejection is not due to overflow CHECK_THROWS_AS(parser_helper(faulty_array.str()),json::parse_error&); } + SECTION("whitespace") + { + CHECK(json::accept(" [true] ")); + CHECK(json::accept(" [true]\t")); + CHECK(json::accept(" [true]\n")); + CHECK(json::accept(" [true]\u000d")); + CHECK(json::accept("\t[true] ")); + CHECK(json::accept("\t[true]\t")); + CHECK(json::accept("\t[true]\n")); + CHECK(json::accept("\t[true]\u000d")); + CHECK(json::accept("\n[true] ")); + CHECK(json::accept("\n[true]\t")); + CHECK(json::accept("\n[true]\n")); + CHECK(json::accept("\n[true]\u000d")); + CHECK(json::accept("\u000d[true] ")); + CHECK(json::accept("\u000d[true]\t")); + CHECK(json::accept("\u000d[true]\n")); + CHECK(json::accept("\u000d[true]\u000d")); + CHECK(json::accept(" \n\t\t\n \t\u000d[true] \n\n\n \t\t\u000d \n")); + } } TEST_CASE("parse") diff --git a/TSF/tests/unit-literals.cpp b/TSF/tests/unit-literals.cpp index 16f360ed64..c38570dd44 100644 --- a/TSF/tests/unit-literals.cpp +++ b/TSF/tests/unit-literals.cpp @@ -1075,6 +1075,57 @@ TEST_CASE("accept") CHECK(!json::accept("NAN")); } } + SECTION("whitespace") + { + CHECK(json::accept(" false ")); + CHECK(json::accept(" false\t")); + CHECK(json::accept(" false\n")); + CHECK(json::accept(" false\u000d")); + CHECK(json::accept("\tfalse ")); + CHECK(json::accept("\tfalse\t")); + CHECK(json::accept("\tfalse\n")); + CHECK(json::accept("\tfalse\u000d")); + CHECK(json::accept("\nfalse ")); + CHECK(json::accept("\nfalse\t")); + CHECK(json::accept("\nfalse\n")); + CHECK(json::accept("\nfalse\u000d")); + CHECK(json::accept("\u000dfalse ")); + CHECK(json::accept("\u000dfalse\t")); + CHECK(json::accept("\u000dfalse\n")); + CHECK(json::accept("\u000dfalse\u000d")); + CHECK(json::accept(" null ")); + CHECK(json::accept(" null\t")); + CHECK(json::accept(" null\n")); + CHECK(json::accept(" null\u000d")); + CHECK(json::accept("\tnull ")); + CHECK(json::accept("\tnull\t")); + CHECK(json::accept("\tnull\n")); + CHECK(json::accept("\tnull\u000d")); + CHECK(json::accept("\nnull ")); + CHECK(json::accept("\nnull\t")); + CHECK(json::accept("\nnull\n")); + CHECK(json::accept("\nnull\u000d")); + CHECK(json::accept("\u000dnull ")); + CHECK(json::accept("\u000dnull\t")); + CHECK(json::accept("\u000dnull\n")); + CHECK(json::accept("\u000dnull\u000d")); + CHECK(json::accept(" true ")); + CHECK(json::accept(" true\t")); + CHECK(json::accept(" true\n")); + CHECK(json::accept(" true\u000d")); + CHECK(json::accept("\ttrue ")); + CHECK(json::accept("\ttrue\t")); + CHECK(json::accept("\ttrue\n")); + CHECK(json::accept("\ttrue\u000d")); + CHECK(json::accept("\ntrue ")); + CHECK(json::accept("\ntrue\t")); + CHECK(json::accept("\ntrue\n")); + CHECK(json::accept("\ntrue\u000d")); + CHECK(json::accept("\u000dtrue ")); + CHECK(json::accept("\u000dtrue\t")); + CHECK(json::accept("\u000dtrue\n")); + CHECK(json::accept("\u000dtrue\u000d")); + } } TEST_CASE("parse") diff --git a/TSF/tests/unit-strings.cpp b/TSF/tests/unit-strings.cpp index 65f90854b4..830ad4372f 100644 --- a/TSF/tests/unit-strings.cpp +++ b/TSF/tests/unit-strings.cpp @@ -243,6 +243,25 @@ TEST_CASE("accept") CHECK(!json::accept("\"\xfe\xfe\xff\xff\"")); } } + SECTION("whitespace") + { + CHECK(json::accept(" \"foo\" ")); + CHECK(json::accept(" \"foo\"\t")); + CHECK(json::accept(" \"foo\"\n")); + CHECK(json::accept(" \"foo\"\u000d")); + CHECK(json::accept("\t\"foo\" ")); + CHECK(json::accept("\t\"foo\"\t")); + CHECK(json::accept("\t\"foo\"\n")); + CHECK(json::accept("\t\"foo\"\u000d")); + CHECK(json::accept("\n\"foo\" ")); + CHECK(json::accept("\n\"foo\"\t")); + CHECK(json::accept("\n\"foo\"\n")); + CHECK(json::accept("\n\"foo\"\u000d")); + CHECK(json::accept("\u000d\"foo\" ")); + CHECK(json::accept("\u000d\"foo\"\t")); + CHECK(json::accept("\u000d\"foo\"\n")); + CHECK(json::accept("\u000d\"foo\"\u000d")); + } } TEST_CASE("Unicode" * doctest::skip()) diff --git a/TSF/trustable/no-json-faults/NJF-11.md b/TSF/trustable/no-json-faults/NJF-11.md index 083c44d913..4674d0a2da 100644 --- a/TSF/trustable/no-json-faults/NJF-11.md +++ b/TSF/trustable/no-json-faults/NJF-11.md @@ -6,6 +6,21 @@ references: name: "lexer::skip_whitespace" path: "include/nlohmann/detail/input/lexer.hpp" description: "function, which skips admissible whitespace during reading" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-literals.cpp" + - type: cpp_test + name: "accept;whitespace;Leading and tailing" + path: "TSF/tests/unit-numbers.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-strings.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-arrays.cpp" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 From 0fd295b589a072fd4d9d24638e184fc4c452e34f Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 14:00:19 +0000 Subject: [PATCH 15/67] add checklists --- TSF/trustable/assertions/TA-Analysis.md | 34 ++++++++++++++++++++ TSF/trustable/assertions/TA-Behaviours.md | 26 +++++++++++++++ TSF/trustable/assertions/TA-Confidence.md | 10 ++++++ TSF/trustable/assertions/TA-Constraints.md | 16 +++++++++ TSF/trustable/assertions/TA-Data.md | 22 +++++++++++++ TSF/trustable/assertions/TA-Fixes.md | 33 +++++++++++++++++++ TSF/trustable/assertions/TA-Indicators.md | 26 +++++++++++++++ TSF/trustable/assertions/TA-Inputs.md | 34 ++++++++++++++++++++ TSF/trustable/assertions/TA-Iterations.md | 12 +++++++ TSF/trustable/assertions/TA-Methodologies.md | 24 ++++++++++++++ TSF/trustable/assertions/TA-Misbehaviours.md | 26 +++++++++++++++ TSF/trustable/assertions/TA-Releases.md | 14 ++++++++ TSF/trustable/assertions/TA-Supply-Chain.md | 14 ++++++++ TSF/trustable/assertions/TA-Tests.md | 14 ++++++++ TSF/trustable/assertions/TA-Updates.md | 14 ++++++++ TSF/trustable/assertions/TA-Validation.md | 30 +++++++++++++++++ 16 files changed, 349 insertions(+) create mode 100644 TSF/trustable/assertions/TA-Analysis.md create mode 100644 TSF/trustable/assertions/TA-Behaviours.md create mode 100644 TSF/trustable/assertions/TA-Confidence.md create mode 100644 TSF/trustable/assertions/TA-Constraints.md create mode 100644 TSF/trustable/assertions/TA-Data.md create mode 100644 TSF/trustable/assertions/TA-Fixes.md create mode 100644 TSF/trustable/assertions/TA-Indicators.md create mode 100644 TSF/trustable/assertions/TA-Inputs.md create mode 100644 TSF/trustable/assertions/TA-Iterations.md create mode 100644 TSF/trustable/assertions/TA-Methodologies.md create mode 100644 TSF/trustable/assertions/TA-Misbehaviours.md create mode 100644 TSF/trustable/assertions/TA-Releases.md create mode 100644 TSF/trustable/assertions/TA-Supply-Chain.md create mode 100644 TSF/trustable/assertions/TA-Tests.md create mode 100644 TSF/trustable/assertions/TA-Updates.md create mode 100644 TSF/trustable/assertions/TA-Validation.md diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-Analysis.md new file mode 100644 index 0000000000..38bec7a993 --- /dev/null +++ b/TSF/trustable/assertions/TA-Analysis.md @@ -0,0 +1,34 @@ +# Checklist for TA-ANALYSIS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* What fraction of Expectations are covered by the test data?
+ Answer: Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. +* What fraction of Misbehaviours are covered by the monitored indicator data?
+ Answer: For the intended use-case, no misbehaviours have been identified. Furthermore, no indicator data are collected. +* How confident are we that the indicator data are accurate and timely?
+ Answer: No indicator data are collected. +* How reliable is the monitoring process?
+ Answer: Due to no indicator data being collected, there is no monitoring process +* How well does the production data correlate with our test data?
+ Answer: Due to the general nature of the library, there are no production data. +* Are we publishing our data analysis?
+ Answer: Since we have no production data with which to compare our not collected indicator data or our test data, no data analysis is done, which is not published. +* Are we comparing and analysing production data vs test?
+ Answer: There are no production data. +* Are our results getting better, or worse?
+ Answer: Neither. +* Are we addressing spikes/regressions?
+ Answer: There are no spikes in the non-existent indicator data. If a test ever fails, then the spike is investigated. The results of fuzz testing are investigated in the original nlohmann/json. +* Do we have sensible/appropriate target failure rates?
+ Answer: For the unit and integration tests, 0. +* Do we need to check the targets?
+ Answer: ???????? No. +* Are we achieving the targets?
+ Answer: ??????? Yes. +* Are all underlying assumptions and target conditions for the analysis specified?
+ Answer: There is no analysis. +* Have the underlying assumptions been verified using known good data?
+ Answer: There is no analysis so that there are no underlying assumptions. +* Has the Misbehaviour identification process been verified using known bad data?
+ Answer: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour. +* Are results shown to be reproducible?
+ Answer: The tests can be reproduced on every machine. diff --git a/TSF/trustable/assertions/TA-Behaviours.md b/TSF/trustable/assertions/TA-Behaviours.md new file mode 100644 index 0000000000..3d22bae965 --- /dev/null +++ b/TSF/trustable/assertions/TA-Behaviours.md @@ -0,0 +1,26 @@ +# Checklist for TA-BEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How has the list of Expectations varied over time?
+ Answer: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. +* How confident can we be that this list is comprehensive?
+ Answer: The list of expectations has been collected amongst the stakeholders in S-CORE, so that we are very confident that the list is comprehensive. +* Could some participants have incentives to manipulate information?
+ Answer: We can not imagine any reason. +* Could there be whole categories of Expectations still undiscovered?
+ Answer: It is unlikely, but the parsing of cbor could become relevant at some time. +* Can we identify Expectations that have been understood but not specified?
+ Answer: No. +* Can we identify some new Expectations, right now?
+ Answer: No. +* How confident can we be that this list covers all critical requirements?
+ Answer: We can not think of any more critical requirement of a JSON parser than to parse JSON data. +* How comprehensive is the list of tests?
+ Answer: The tests cover 99.186% of the code, according to coveralls, so we assume that the list of tests is quite comprehensive. +* Is every Expectation covered by at least one implemented test?
+ Answer: Yes, both of the expectations are covered by at least one implemented test. +* Are there any Expectations where we believe more coverage would help?
+ Answer: No. +* How do dependencies affect Expectations, and are their properties verifiable?
+ Answer: There are no dependencies. +* Are input analysis findings from components, tools, and data considered in relation to Expectations?
+ Answer: No findings have been found. diff --git a/TSF/trustable/assertions/TA-Confidence.md b/TSF/trustable/assertions/TA-Confidence.md new file mode 100644 index 0000000000..ffcb14d063 --- /dev/null +++ b/TSF/trustable/assertions/TA-Confidence.md @@ -0,0 +1,10 @@ +# Checklist for TA-CONFIDENCE from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* What is the algorithm for combining/comparing the scores?
+ Answer: It is the standard algorithm of trudag. +* How confident are we that this algorithm is fit for purpose?
+ Answer: We have no reason to assume that the standard algorithm is not fit for our purpose. +* What are the trends for each score?
+ Answer: CAN NOT BE ANSWERED NOW +* How well do our scores correlate with external feedback signals?
+ Answer: CAN NOT BE ANSWERED NOW \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md new file mode 100644 index 0000000000..873ff34571 --- /dev/null +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -0,0 +1,16 @@ +# Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Are the constraints grounded in realistic expectations, backed by real-world examples?
+ Answer: Yes, the constraints are NOICE +* Do they effectively guide downstream consumers in expanding upon existing Statements?
+ Answer: +* Do they provide clear guidance for upstreams on reusing components with well-defined claims?
+ Answer: +* Are any Statements explicitly designated as not reusable or adaptable?
+ Answer: +* Are there worked examples from downstream or upstream users demonstrating these constraints in practice?
+ Answer: +* Have there been any documented misunderstandings from users, and are these visibly resolved?
+ Answer: +* Do external users actively keep up with updates, and are they properly notified of any changes?
+ Answer: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Data.md b/TSF/trustable/assertions/TA-Data.md new file mode 100644 index 0000000000..8beac041b7 --- /dev/null +++ b/TSF/trustable/assertions/TA-Data.md @@ -0,0 +1,22 @@ +# Checklist for TA-DATA from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Is all test data stored with long-term accessibility?
+ Answer: If we assume that github is long-term accessible, then yes. +* Is all monitoring data stored with long-term accessibility?
+ Answer: There are no monitoring data. +* Are extensible data models implemented?
+ Answer: The data are stored in a sqlite database. +* Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy?
+ Answer: There are no sensitive data produced, collected or stored. +* Are proper backup mechanisms in place?
+ Answer: Not more than the default mechanisms of github. +* Are storage and backup limits tested?
+ Answer: No. +* Are all data changes traceable?
+ Answer: Yes, due to the usage of github. +* Are concurrent changes correctly managed and resolved?
+ Answer: Yes, due to the usage of github. +* Is data accessible only to intended parties?
+ Answer: There are no unintended parties. +* Are any subsets of our data being published?
+ Answer: Yes, the collected data are publicly available. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Fixes.md b/TSF/trustable/assertions/TA-Fixes.md new file mode 100644 index 0000000000..4ec52aec32 --- /dev/null +++ b/TSF/trustable/assertions/TA-Fixes.md @@ -0,0 +1,33 @@ +# Checklist for TA-FIXES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + + +* How many faults have we identified in XYZ?
+ Answer: There are no identifiable faults concerning the expectations. +* How many unknown faults remain to be found, based on the number that have been processed so far?
+ Answer: It is unlikely that there are unknown faults concerning the expectations. +* Is there any possibility that people could be motivated to manipulate the lists (e.g. bug bonus or pressure to close).
+ Answer: Since the project is entirely open source, it is quite unlikely. +* How many faults may be unrecorded (or incorrectly closed, or downplayed)?
+ Answer: There may be none concerning the expectations. +* How do we collect lists of bugs and known vulnerabilities from components?
+ Answer: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. +* How (and how often) do we check these lists for relevant bugs and known vulnerabilities?
+ Answer: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is enticed to check this issue on applicability. +* How confident can we be that the lists are honestly maintained?
+ Answer: We can not imagine a reason why the list could be dishonestly maintained. +* Could some participants have incentives to manipulate information?
+ Answer: We can not think of a reason why. +* How confident are we that the lists are comprehensive?
+ Answer: We have no reason to assume that discovered bugs are not reported to nlohmann/json. +* Could there be whole categories of bugs/vulnerabilities still undiscovered?
+ Answer: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug. +* How effective is our triage/prioritisation?
+ Answer: UNKNOWN +* How many components have never been updated?
+ Answer: None, the single component is up to date. +* How confident are we that we could update them?
+ Answer: If nlohmann/json would release an new version, we are very confident that we can update to that version. +* How confident are we that outstanding fixes do not impact our Expectations?
+ Answer: We have not found any outstanding fixes impacting our expectations. +* How confident are we that outstanding fixes do not address Misbehaviours?
+ Answer: We are very confident that none of the outstanding fixes do not affect the no identified misbehaviours. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Indicators.md b/TSF/trustable/assertions/TA-Indicators.md new file mode 100644 index 0000000000..a7f3eaa286 --- /dev/null +++ b/TSF/trustable/assertions/TA-Indicators.md @@ -0,0 +1,26 @@ +# Checklist for TA-INDICATORS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How appropriate/thorough are the analyses that led to the indicators?
+ Answer: There are no warning indicators for the none misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259. +* How confident can we be that the list of indicators is comprehensive?
+ Answer: Since there are none, the list is not comprehensive at all. +* Could there be whole categories of warning indicators still missing?
+ Answer: Yes, there could. These warning indicators, however, should be implemented in the wrapper defining the interface between the library and the project using it. +* How has the list of advance warning indicators varied over time?
+ Answer: It has stayed constant. +* How confident are we that the indicators are leading/predictive?
+ Answer: There are none. +* Are there misbehaviours that have no advance warning indicators?
+ Answer: There are no misbehaviours identified. +* Can we collect data for all indicators?
+ Answer: We do collect data for all indicators that are currently implemented. +* Are the monitoring mechanisms used included in our Trustable scope?
+ Answer: No, but there are also none. +* Are there gaps or trends in the data?
+ Answer: There are no data. +* If there are gaps or trends, are they analysed and addressed?
+ Answer: There are no data. +* Is the data actually predictive/useful?
+ Answer: There are no data. +* Are indicators from code, component, tool, or data inspections taken into consideration?
+ Answer: There are no indicators. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Inputs.md b/TSF/trustable/assertions/TA-Inputs.md new file mode 100644 index 0000000000..c3281c45fb --- /dev/null +++ b/TSF/trustable/assertions/TA-Inputs.md @@ -0,0 +1,34 @@ +# Checklist for TA-INPUTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +The single_include/nlohmann/json.hpp is the single and only component of the library. + +* Are there components that are not on the list?
+ Answer: No. +* Are there assessments for all components?
+ Answer: ????? +* Has an assessment been done for the current version of the component?
+ Answer: ????? +* Have sources of bug and/or vulnerability data been identified?
+ Answer: There are no bug and/or vulnerability data. +* Have additional tests and/or Expectations been documented and linked to component assessment?
+ Answer: ?????? +* Are component tests run when integrating new versions of components?
+ Answer: There are no further components. +* Are there tools that are not on the list?
+ Answer: No. +* Are there impact assessments for all tools?
+ Answer: ?????? +* Have tools with high impact been qualified?
+ Answer: There are no tools with high impact. +* Were assessments or reviews done for the current tool versions?
+ Answer: ????? +* Have additional tests and/or Expectations been documented and linked to tool assessments?
+ Answer: No. +* Are tool tests run when integrating new versions of tools?
+ Answer: There are no tools. +* Are tool and component tests included in release preparation?
+ Answer: Yes, the tests of the library are included in the release. +* Can patches be applied, and then upstreamed for long-term maintenance?
+ Answer: Yes, if ever a misbehaviour is found and patched, then a pull-request to the original nlohmann/json repository can be opened to upstream the changes. +* Do all dependencies comply with acceptable licensing terms?
+ Answer: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Iterations.md b/TSF/trustable/assertions/TA-Iterations.md new file mode 100644 index 0000000000..52b0e770d5 --- /dev/null +++ b/TSF/trustable/assertions/TA-Iterations.md @@ -0,0 +1,12 @@ +# Checklist for TA-ITERATIONS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How much of the software is provided as binary only, expressed as a fraction of the BoM list?
+ Answer: None. +* How much is binary, expressed as a fraction of the total storage footprint?
+ Answer: None. +* For binaries, what claims are being made and how confident are we in the people/organisations making the claims?
+ Answer: There are no binaries. +* For third-party source code, what claims are we making, and how confident are we about these claims?
+ Answer: There is no third-party source code in the library. +* For software developed by us, what claims are we making, and how confident are we about these claims?
+ Answer: This is the remainder of the documentation. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Methodologies.md b/TSF/trustable/assertions/TA-Methodologies.md new file mode 100644 index 0000000000..8d495ea7f9 --- /dev/null +++ b/TSF/trustable/assertions/TA-Methodologies.md @@ -0,0 +1,24 @@ +# Checklist for TA-METHODOLOGIES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +This project follows purely the Methodologies of Eclipse S-CORE. + +* Are the identified gaps documented clearly to justify using a manual process?
+ Answer: +* Are the goals for each process clearly defined?
+ Answer: +* Is the sequence of procedures documented in an unambiguous manner?
+ Answer: +* Can improvements to the processes be suggested and implemented?
+ Answer: +* How frequently are processes changed?
+ Answer: +* How are changes to manual processes communicated?
+ Answer: +* Are there any exceptions to the processes?
+ Answer: +* How is evidence of process adherence recorded?
+ Answer: +* How is the effectiveness of the process evaluated?
+ Answer: +* Is ongoing training required to follow these processes?
+ Answer: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Misbehaviours.md b/TSF/trustable/assertions/TA-Misbehaviours.md new file mode 100644 index 0000000000..688f44e5ad --- /dev/null +++ b/TSF/trustable/assertions/TA-Misbehaviours.md @@ -0,0 +1,26 @@ +# Checklist for TA-MISBEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How has the list of misbehaviours varied over time?
+ Answer: +* How confident can we be that this list is comprehensive?
+ Answer: +* How well do the misbehaviours map to the expectations?
+ Answer: +* Could some participants have incentives to manipulate information?
+ Answer: +* Could there be whole categories of misbehaviours still undiscovered?
+ Answer: +* Can we identify misbehaviours that have been understood but not specified?
+ Answer: +* Can we identify some new misbehaviours, right now?
+ Answer: +* Is every misbehaviour represented by at least one fault induction test?
+ Answer: +* Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately?
+ Answer: +* Are all the fault induction results actually collected?
+ Answer: +* Are the results evaluated?
+ Answer: +* Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations?
+ Answer: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Releases.md b/TSF/trustable/assertions/TA-Releases.md new file mode 100644 index 0000000000..851d194eea --- /dev/null +++ b/TSF/trustable/assertions/TA-Releases.md @@ -0,0 +1,14 @@ +# Checklist for TA-RELEASES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How confident are we that all components are taken from within our controlled environment?
+ Answer: This library does not take anything from outside of this repository. +* How confident are we that all of the tools we are using are also under our control?
+ Answer: The version of nlohmann/json that is documented with this documentation is under the full control of the Eclipse S-CORE organisation. +* Are our builds repeatable on a different server, or in a different context?
+ Answer: Since there is no "build" of the header-only library, yes. +* How sure are we that our builds don't access the internet?
+ Answer: There is no implemented access to the internet in the library itself. The testsuite is downloaded from a within Eclipse S-CORE. +* How many of our components are non-reproducible?
+ Answer: The single component is reproducible. +* How confident are we that our reproducibility check is correct?
+ Answer: Quite. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Supply-Chain.md b/TSF/trustable/assertions/TA-Supply-Chain.md new file mode 100644 index 0000000000..9fc95469dc --- /dev/null +++ b/TSF/trustable/assertions/TA-Supply-Chain.md @@ -0,0 +1,14 @@ +# Checklist for TA-SUPPLY_CHAIN from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Could there be other components, missed from the list?
+ Answer: No. +* Does the list include all toolchain components?
+ Answer: Yes. +* Does the toolchain include a bootstrap?
+ Answer: No. +* Could the content of a mirrored project be compromised by an upstream change?
+ Answer: No. +* Are mirrored projects up to date with the upstream project?
+ Answer: Yes, the library is up to date with the most recent release of the original nlohmann/json +* Are mirrored projects based on the correct upstream?
+ Answer: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Tests.md b/TSF/trustable/assertions/TA-Tests.md new file mode 100644 index 0000000000..1fec8ad089 --- /dev/null +++ b/TSF/trustable/assertions/TA-Tests.md @@ -0,0 +1,14 @@ +# Checklist for TA-TESTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible?
+ Answer: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO) +* Are any exceptions identified, documented and justified?
+ Answer: Not applicable. +* How confident are we that all test components are taken from within our controlled environment?
+ Answer: All tests are either self-contained or download test data from [within Eclipse S-CORE](https://github.com/eclipse-score/inc_nlohmann_json/tree/json_test_data_version_3_1_0_mirror). +* How confident are we that all of the test environments we are using are also under our control?
+ Answer: ???? The environments are standard docker images of ubuntu and standard versions of compilers. +* Do we record all test environment components, including hardware and infrastructure used for exercising tests and processing input/output data?
+ Answer: No, since the tests are independent from hard-ware, these data are not collected. +* How confident are we that all tests scenarios are repeatable?
+ Answer: Very. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Updates.md b/TSF/trustable/assertions/TA-Updates.md new file mode 100644 index 0000000000..179df0fc88 --- /dev/null +++ b/TSF/trustable/assertions/TA-Updates.md @@ -0,0 +1,14 @@ +# Checklist for TA-UPDATES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Where are the change and configuration management controls specified?
+ Answer: WIP +* Are these controls enforced for all of components, tools, data, documentation and configurations?
+ Answer: The S-CORE Methodology is followed, compliance with which enforces the change process to be followed. +* Are there any ways in which these controls can be subverted, and have we mitigated them?
+ Answer: Yes, the change process can just not be followed. We have no real method to enforce it other than to trust that the committers follow the S-CORE processes. +* Does change control capture all potential regressions?
+ Answer: NO CLUE +* Is change control timely enough?
+ Answer: Not applicable, as far as can be understood right now, there is no immanent need to keep the library up to date. +* Are all guidance and checks understandable and consistently followed?
+ Answer: WIP \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Validation.md b/TSF/trustable/assertions/TA-Validation.md new file mode 100644 index 0000000000..3fcc8a5100 --- /dev/null +++ b/TSF/trustable/assertions/TA-Validation.md @@ -0,0 +1,30 @@ +# Checklist for TA-VALIDATION from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +I DO NOT FEEL CONFIDENT TO ANSWER THIS! + +* Is the selection of tests correct?
+ Answer: +* Are the tests executed enough times?
+ Answer: +* How confident are we that all test results are being captured?
+ Answer: +* Can we look at any individual test result, and establish what it relates to?
+ Answer: +* Can we trace from any test result to the expectation it relates to?
+ Answer: +* Can we identify precisely which environment (software and hardware) were used?
+ Answer: +* How many pass/fail results would be expected, based on the scheduled tests?
+ Answer: +* Do we have all of the expected results?
+ Answer: +* Do we have time-series data for all of those results?
+ Answer: +* If there are any gaps, do we understand why?
+ Answer: +* Are the test validation strategies credible and appropriate?
+ Answer: +* What proportion of the implemented tests are validated?
+ Answer: +* Have the tests been verified using known good and bad data?
+ Answer: \ No newline at end of file From b052f23a09b98d56689be13cb27f4dee0e7ba531 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 14:00:34 +0000 Subject: [PATCH 16/67] add update-helper --- TSF/scripts/update_helper.py | 274 +++++++++++++++++++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100644 TSF/scripts/update_helper.py diff --git a/TSF/scripts/update_helper.py b/TSF/scripts/update_helper.py new file mode 100644 index 0000000000..a8472b99a2 --- /dev/null +++ b/TSF/scripts/update_helper.py @@ -0,0 +1,274 @@ +import argparse +import re +import requests +import hashlib +from pathlib import Path + +def main() -> None: + ap = argparse.ArgumentParser(description="little helper script automatically updating version numbers and release dates") + ap.add_argument("-v", + "--version", + help="version number to be updated to; if unspecified, most recent version is chosen", + default=None + ) + ap.add_argument("-c", + "--check", + help="checks whether current and specified (or most recent) version of single_include/nlohmann/json.hpp coincide; no other action is performed", + action=argparse.BooleanOptionalAction + ) + ap.add_argument("-a", + "--auto", + help="automatically updates all options", + action=argparse.BooleanOptionalAction + ) + ap.add_argument("-u", + "--update", + action='append', + choices=["JLS-01","JLS-06","JLS-07","JLS-11","JLS-14","introduction","misbehaviours"], + help="updates the specified file(s):" \ + " JLS-01 - TSF/trustable/JLS-01.md," \ + " JLS-06 - TSF/trustable/JLS-06.md," \ + " JLS-07 - TSF/trustable/JLS-07.md," \ + " JLS-11 - TSF/trustable/JLS-11.md," \ + " JLS-14 - TSF/trustable/JLS-14.md," \ + " introduction - TSF/docs/introduction/index.rst," \ + " misbehaviours - TSF/scripts/generate_list_of_misbehaviours.py", + default=None + ) + ap.add_argument("-b", + "--branch", + help="name of the branch to which the references for branch protection and workflow-failures point to", + default=None + ) + ap.add_argument("-bo", + "--branch_only", + help="adapts branch-names only", + action=argparse.BooleanOptionalAction + ) + args = ap.parse_args() + + root = Path(__file__).resolve().parent.parent.parent + + print(args) + + if (not args.check + and ( + (not args.auto + and args.update is None) + or (args.branch_only + and args.branch is None) + ) + ): + # do nothing + return None + + # Fetch the metadata + version, url, release_date, expected_sha = fetch_metadata(args.version) + + # if flag check is set, then the sha of single_include/nlohmann/json.hpp is cross-checked with the sha of the specified version + if args.check: + if not check(expected_sha,root): + if args.version is None: + print(f"The current version of single_include/nlohmann/json.hpp is not the most recent one, which is {version}.") + else: + print(f"The current version of single_include/nlohmann/json.hpp does not coincide with {version}.") + else: + if args.version is None: + print(f"The current version of single_include/nlohmann/json.hpp is the most recent one, which is {version}.") + else: + print(f"The current version of single_include/nlohmann/json.hpp coincides with {version}.") + # No other action is performed. + return None + if not check(expected_sha,root): + print("\nWARNING! The current version of single_include/nlohmann/json.hpp does not coincide with {version}.\n\nIf you proceed, then the documentation is expected to contain wrong data!") + user = input("Proceed anyway? [y/n] ").strip().lower() + if user != "y": + print("Aborting update ...") + return None + # if flag auto is set, then all is updated automatically + if args.auto: + if args.branch is not None: + update_JLS_01(args.branch,root) + update_JLS_06(args.branch,root) + update_JLS_07(args.branch,root) + if not args.branch_only: + update_JLS_11(release_date,root) + update_JLS_14(url,expected_sha,root) + update_intro(version,root) + update_misbehaviours(version,release_date,root) + # no other action is necessary + return None + if "JLS-01" in args.update: + update_JLS_01(args.branch,root) + if "JLS-06" in args.update: + update_JLS_06(args.branch,root) + if "JLS-07" in args.update: + update_JLS_07(args.branch,root) + if args.branch_only: + return None + if "JLS-11" in args.update: + update_JLS_11(release_date,root) + if "JLS-14" in args.update: + update_JLS_14(url,expected_sha,root) + if "introduction" in args.update: + update_intro(version,root) + if "misbehaviours" in args.update: + update_misbehaviours(version,release_date,root) + +def update_JLS_01(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-01.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-01.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_06(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-06.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-06.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_07(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-07.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-07.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_11(release_date: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-11.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-11.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*release_date:\s*")([^"]*)(")', r'\g<1>' + release_date + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_14(url: str, sha: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_14 = root / "trustable/statements/JLS-14.md" + else: + path_to_jls_14 = root / "TSF/trustable/statements/JLS-14.md" + data = path_to_jls_14.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*sha:\s*")([^"]*)(")', r'\g<1>' + sha + r'\g<3>', data) + data = re.sub(r'(?m)^(\s*url:\s*")([^"]*)(")', r'\g<1>' + url + r'\g<3>', data) + path_to_jls_14.write_text(data) + +def update_intro(version: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_intro = root / "docs/introduction/index.rst" + else: + path_to_intro = root / "TSF/docs/introduction/index.rst" + data = path_to_intro.read_text(encoding='utf-8') + data = re.sub(r'(\(version\s+)([^)]*)(\))', + lambda m: f"{m.group(1)}{version}{m.group(3)}", + data) + path_to_intro.write_text(data) + +def update_misbehaviours(version: str, release_date: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent + path_to_script = root / "generate_list_of_misbehaviours.py" + else: + path_to_script = root / "TSF/scripts/generate_list_of_misbehaviours.py" + data = path_to_script.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*version\s*=\s*")([^"]*)(")', r'\g<1>' + version + r'\g<3>', data) + data = re.sub(r'(?m)^(\s*release_date\s*=\s*")([^"]*)(")', r'\g<1>' + release_date + r'\g<3>', data) + path_to_script.write_text(data) + + +def fetch_metadata(version = None) -> tuple[str,str,str]: + # This function fetches the metadata of the release of the version of nlohmann/json specified in the input. + # If the input is None, then the most recent version is fetched. + # The function returns the version number, the release date in the format %Y-%m-%dT%H:%M:%SZ + # and the sha256-value of the json.hpp of the released version + + if version is None: + version = "" + + # fetch the sha-value of most recent release + releases = requests.get("https://github.com/nlohmann/json/releases") + if releases.status_code != 200: + raise Warning("The release page of nlohmann/json appears to be currently not reachable.") + releases_by_the_line = releases.text.splitlines() + # releases is expected to be huge, delete to free up room + del releases + found_version = False + found_sha = False + found_release_date = False + found_tag = False + for line in releases_by_the_line: + # look for + if not found_version and f"JSON for Modern C++ version {version}" not in line: + continue + elif not found_version: + if version == "": + m = re.search(r'JSON for Modern C\+\+ version\s*([^<"]*)<',line) + if m is None: + raise RuntimeError("Critical Error: Can not find version number of most recent release!") + version = m.group(1) + found_version = True + continue + if not found_release_date and "datetime=" in line: + m = re.search(r'datetime\s*=\s*"([^"]*)"', line) + if m is None: + raise RuntimeError(f"Critical Error: Can not find release-date of version {version}!") + release_date = m.group(1) if m else None + found_release_date = True + if not found_sha and "SHA-256:" in line and "(json.hpp)" in line: + expected_sha = line.split("SHA-256:", 1)[1].split("(json.hpp)", 1)[0].strip() + found_sha = True + if not found_tag and "/nlohmann/json/tree" in line: + m = re.search(r'href\s*=\s*"([^"]*)"', line) + if m is None: + raise RuntimeError(f"Critical Error: Can not find link to release version {version}!") + url = "https://github.com" + m.group(1) + found_tag = True + if found_version and found_sha and found_release_date and found_tag: + return (version, url, release_date, expected_sha) + if "JSON for Modern C++ version" in line and f"JSON for Modern C++ version {version}" not in line: + if not found_version and not found_release_date and not found_tag: + error_message = "Could not find any metadata" + elif not found_sha: + error_message = "Could not find SHA-value for json.hpp" + if not found_release_date: + error_message += " and relase-date" + elif not found_tag: + error_message += " and link to code" + elif not found_release_date: + error_message = "Could not find release-date" + if not found_tag: + error_message += " and link to code" + else: + error_message = "Could not find link to code" + error_message += f" of version {version}!" if version!="" else " of most recent version!" + raise RuntimeError(error_message) + # If ever the for-loop comes to its end, the specified version can not be found! + raise RuntimeError(f"Can not locate the release of version {version}!") + + +def check(expected_sha: str, root: Path | None = None) -> bool: + # get the actual sha-value of the single_include.json + if root is None: + root = Path(__file__).resolve().parent.parent.parent + single_include_json_path = root / "single_include/nlohmann/json.hpp" + with single_include_json_path.open('rb') as f: + actual_sha = hashlib.file_digest(f, 'sha256').hexdigest() + return actual_sha == expected_sha + + +if __name__ == "__main__": + main() \ No newline at end of file From 379ff2163f4cf9f146b2ff525fb149b781c6c4bf Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 14:12:16 +0000 Subject: [PATCH 17/67] add references --- TSF/trustable/assertions/TA-ANALYSIS.md | 3 +++ TSF/trustable/assertions/TA-BEHAVIOURS.md | 3 +++ TSF/trustable/assertions/TA-CONFIDENCE.md | 3 +++ TSF/trustable/assertions/TA-CONSTRAINTS.md | 3 +++ TSF/trustable/assertions/TA-DATA.md | 3 +++ TSF/trustable/assertions/TA-FIXES.md | 3 +++ TSF/trustable/assertions/TA-INDICATORS.md | 3 +++ TSF/trustable/assertions/TA-INPUTS.md | 3 +++ TSF/trustable/assertions/TA-ITERATIONS.md | 3 +++ TSF/trustable/assertions/TA-METHODOLOGIES.md | 3 +++ TSF/trustable/assertions/TA-MISBEHAVIOURS.md | 3 +++ TSF/trustable/assertions/TA-RELEASES.md | 3 +++ TSF/trustable/assertions/TA-SUPPLY_CHAIN.md | 3 +++ TSF/trustable/assertions/TA-TESTS.md | 3 +++ TSF/trustable/assertions/TA-UPDATES.md | 3 +++ TSF/trustable/assertions/TA-VALIDATION.md | 3 +++ 16 files changed, 48 insertions(+) diff --git a/TSF/trustable/assertions/TA-ANALYSIS.md b/TSF/trustable/assertions/TA-ANALYSIS.md index ce74c5c359..ef43afa9b5 100644 --- a/TSF/trustable/assertions/TA-ANALYSIS.md +++ b/TSF/trustable/assertions/TA-ANALYSIS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Analysis.md --- Collected data from tests and monitoring of deployed software is analysed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS.md b/TSF/trustable/assertions/TA-BEHAVIOURS.md index 135f16b2f3..d9a791777a 100644 --- a/TSF/trustable/assertions/TA-BEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-BEHAVIOURS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Behaviours.md --- Expected or required behaviours for nlohmann/json library are identified, specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-CONFIDENCE.md b/TSF/trustable/assertions/TA-CONFIDENCE.md index a6fc8e3276..574b6e58be 100644 --- a/TSF/trustable/assertions/TA-CONFIDENCE.md +++ b/TSF/trustable/assertions/TA-CONFIDENCE.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Confidence.md --- Confidence in nlohmann/json library is measured based on results of analysis. diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS.md b/TSF/trustable/assertions/TA-CONSTRAINTS.md index 8cf85ca15e..b9d4b5e739 100644 --- a/TSF/trustable/assertions/TA-CONSTRAINTS.md +++ b/TSF/trustable/assertions/TA-CONSTRAINTS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Constraints.md --- Constraints on adaptation and deployment of nlohmann/json library are specified. diff --git a/TSF/trustable/assertions/TA-DATA.md b/TSF/trustable/assertions/TA-DATA.md index 19dda373d7..a3807cab45 100644 --- a/TSF/trustable/assertions/TA-DATA.md +++ b/TSF/trustable/assertions/TA-DATA.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Data.md --- Data is collected from tests, and from monitoring of deployed software, according to specified objectives. diff --git a/TSF/trustable/assertions/TA-FIXES.md b/TSF/trustable/assertions/TA-FIXES.md index 353fa45d6f..c3b736bd3a 100644 --- a/TSF/trustable/assertions/TA-FIXES.md +++ b/TSF/trustable/assertions/TA-FIXES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Fixes.md --- Known bugs or misbehaviours are analysed and triaged, and critical fixes or mitigations are implemented or applied. diff --git a/TSF/trustable/assertions/TA-INDICATORS.md b/TSF/trustable/assertions/TA-INDICATORS.md index fc8e87391b..ade313e144 100644 --- a/TSF/trustable/assertions/TA-INDICATORS.md +++ b/TSF/trustable/assertions/TA-INDICATORS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Indicators.md --- Advanced warning indicators for misbehaviours are identified, and monitoring mechanisms are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-INPUTS.md b/TSF/trustable/assertions/TA-INPUTS.md index 325480ff97..72a5be11a0 100644 --- a/TSF/trustable/assertions/TA-INPUTS.md +++ b/TSF/trustable/assertions/TA-INPUTS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Inputs.md --- All inputs to nlohmann/json library are assessed, to identify potential risks and issues. diff --git a/TSF/trustable/assertions/TA-ITERATIONS.md b/TSF/trustable/assertions/TA-ITERATIONS.md index 7137b3f324..9b09191ca2 100644 --- a/TSF/trustable/assertions/TA-ITERATIONS.md +++ b/TSF/trustable/assertions/TA-ITERATIONS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Iterations.md --- All constructed iterations of nlohmann/json library include source code, build instructions, tests, results and attestations. diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES.md b/TSF/trustable/assertions/TA-METHODOLOGIES.md index 3330900d98..87b61a6903 100644 --- a/TSF/trustable/assertions/TA-METHODOLOGIES.md +++ b/TSF/trustable/assertions/TA-METHODOLOGIES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Methodologies.md --- Manual methodologies applied for nlohmann/json library by contributors, and their results, are managed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md index fbb67f2e65..2fac459cd0 100644 --- a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Misbehaviours.md --- Prohibited misbehaviours for nlohmann/json library are identified, and mitigations are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-RELEASES.md b/TSF/trustable/assertions/TA-RELEASES.md index 033b77df8e..8dbec10896 100644 --- a/TSF/trustable/assertions/TA-RELEASES.md +++ b/TSF/trustable/assertions/TA-RELEASES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Releases.md --- Construction of nlohmann/json library releases is fully repeatable and the results are fully reproducible, with any exceptions documented and justified. diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md index 43939dc774..951a2edb18 100644 --- a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Supply-Chain.md --- All sources for nlohmann/json library and tools are mirrored in our controlled environment. diff --git a/TSF/trustable/assertions/TA-TESTS.md b/TSF/trustable/assertions/TA-TESTS.md index 474886070a..81ab177f1c 100644 --- a/TSF/trustable/assertions/TA-TESTS.md +++ b/TSF/trustable/assertions/TA-TESTS.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Tests.md --- All tests for nlohmann/json library, and its build and test environments, are constructed from controlled/mirrored sources and are reproducible, with any exceptions documented. diff --git a/TSF/trustable/assertions/TA-UPDATES.md b/TSF/trustable/assertions/TA-UPDATES.md index c54015ba90..bdb71e182f 100644 --- a/TSF/trustable/assertions/TA-UPDATES.md +++ b/TSF/trustable/assertions/TA-UPDATES.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Updates.md --- nlohmann/json library components, configurations and tools are updated under specified change and configuration management controls. diff --git a/TSF/trustable/assertions/TA-VALIDATION.md b/TSF/trustable/assertions/TA-VALIDATION.md index 08cba3497e..49aa12feab 100644 --- a/TSF/trustable/assertions/TA-VALIDATION.md +++ b/TSF/trustable/assertions/TA-VALIDATION.md @@ -1,6 +1,9 @@ --- level: 1.1 normative: true +references: + - type: file + path: ./TSF/trustable/assertions/TA-Validation.md --- All specified tests are executed repeatedly, under defined conditions in controlled environments, according to specified objectives. From 596e2b8f6d1542ea2d48f7f16ded13c7aff93ef0 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 14:45:41 +0000 Subject: [PATCH 18/67] pretty printing --- TSF/trustable/assertions/TA-ANALYSIS.md | 2 +- TSF/trustable/assertions/TA-BEHAVIOURS.md | 2 +- TSF/trustable/assertions/TA-CONFIDENCE.md | 2 +- TSF/trustable/assertions/TA-CONSTRAINTS.md | 2 +- TSF/trustable/assertions/TA-DATA.md | 2 +- TSF/trustable/assertions/TA-FIXES.md | 2 +- TSF/trustable/assertions/TA-INDICATORS.md | 2 +- TSF/trustable/assertions/TA-INPUTS.md | 2 +- TSF/trustable/assertions/TA-ITERATIONS.md | 2 +- TSF/trustable/assertions/TA-METHODOLOGIES.md | 2 +- TSF/trustable/assertions/TA-MISBEHAVIOURS.md | 2 +- TSF/trustable/assertions/TA-RELEASES.md | 2 +- TSF/trustable/assertions/TA-SUPPLY_CHAIN.md | 2 +- TSF/trustable/assertions/TA-TESTS.md | 2 +- TSF/trustable/assertions/TA-UPDATES.md | 2 +- TSF/trustable/assertions/TA-VALIDATION.md | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/TSF/trustable/assertions/TA-ANALYSIS.md b/TSF/trustable/assertions/TA-ANALYSIS.md index ef43afa9b5..192061db14 100644 --- a/TSF/trustable/assertions/TA-ANALYSIS.md +++ b/TSF/trustable/assertions/TA-ANALYSIS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Analysis.md --- diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS.md b/TSF/trustable/assertions/TA-BEHAVIOURS.md index d9a791777a..8a709edfab 100644 --- a/TSF/trustable/assertions/TA-BEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-BEHAVIOURS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Behaviours.md --- diff --git a/TSF/trustable/assertions/TA-CONFIDENCE.md b/TSF/trustable/assertions/TA-CONFIDENCE.md index 574b6e58be..964e14cebe 100644 --- a/TSF/trustable/assertions/TA-CONFIDENCE.md +++ b/TSF/trustable/assertions/TA-CONFIDENCE.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Confidence.md --- diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS.md b/TSF/trustable/assertions/TA-CONSTRAINTS.md index b9d4b5e739..f75ef9d10a 100644 --- a/TSF/trustable/assertions/TA-CONSTRAINTS.md +++ b/TSF/trustable/assertions/TA-CONSTRAINTS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Constraints.md --- diff --git a/TSF/trustable/assertions/TA-DATA.md b/TSF/trustable/assertions/TA-DATA.md index a3807cab45..5b227f9143 100644 --- a/TSF/trustable/assertions/TA-DATA.md +++ b/TSF/trustable/assertions/TA-DATA.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Data.md --- diff --git a/TSF/trustable/assertions/TA-FIXES.md b/TSF/trustable/assertions/TA-FIXES.md index c3b736bd3a..402c111376 100644 --- a/TSF/trustable/assertions/TA-FIXES.md +++ b/TSF/trustable/assertions/TA-FIXES.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Fixes.md --- diff --git a/TSF/trustable/assertions/TA-INDICATORS.md b/TSF/trustable/assertions/TA-INDICATORS.md index ade313e144..f742f694d1 100644 --- a/TSF/trustable/assertions/TA-INDICATORS.md +++ b/TSF/trustable/assertions/TA-INDICATORS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Indicators.md --- diff --git a/TSF/trustable/assertions/TA-INPUTS.md b/TSF/trustable/assertions/TA-INPUTS.md index 72a5be11a0..6284d05cad 100644 --- a/TSF/trustable/assertions/TA-INPUTS.md +++ b/TSF/trustable/assertions/TA-INPUTS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Inputs.md --- diff --git a/TSF/trustable/assertions/TA-ITERATIONS.md b/TSF/trustable/assertions/TA-ITERATIONS.md index 9b09191ca2..65e0ac14c8 100644 --- a/TSF/trustable/assertions/TA-ITERATIONS.md +++ b/TSF/trustable/assertions/TA-ITERATIONS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Iterations.md --- diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES.md b/TSF/trustable/assertions/TA-METHODOLOGIES.md index 87b61a6903..938f780e14 100644 --- a/TSF/trustable/assertions/TA-METHODOLOGIES.md +++ b/TSF/trustable/assertions/TA-METHODOLOGIES.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Methodologies.md --- diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md index 2fac459cd0..b3ded4e5da 100644 --- a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Misbehaviours.md --- diff --git a/TSF/trustable/assertions/TA-RELEASES.md b/TSF/trustable/assertions/TA-RELEASES.md index 8dbec10896..f32550fa8e 100644 --- a/TSF/trustable/assertions/TA-RELEASES.md +++ b/TSF/trustable/assertions/TA-RELEASES.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Releases.md --- diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md index 951a2edb18..134ee73d76 100644 --- a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Supply-Chain.md --- diff --git a/TSF/trustable/assertions/TA-TESTS.md b/TSF/trustable/assertions/TA-TESTS.md index 81ab177f1c..fa737ec865 100644 --- a/TSF/trustable/assertions/TA-TESTS.md +++ b/TSF/trustable/assertions/TA-TESTS.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Tests.md --- diff --git a/TSF/trustable/assertions/TA-UPDATES.md b/TSF/trustable/assertions/TA-UPDATES.md index bdb71e182f..69a72e3712 100644 --- a/TSF/trustable/assertions/TA-UPDATES.md +++ b/TSF/trustable/assertions/TA-UPDATES.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Updates.md --- diff --git a/TSF/trustable/assertions/TA-VALIDATION.md b/TSF/trustable/assertions/TA-VALIDATION.md index 49aa12feab..fd48b70293 100644 --- a/TSF/trustable/assertions/TA-VALIDATION.md +++ b/TSF/trustable/assertions/TA-VALIDATION.md @@ -2,7 +2,7 @@ level: 1.1 normative: true references: - - type: file + - type: checklist path: ./TSF/trustable/assertions/TA-Validation.md --- From 0dc49ff2177fed5df35dda90c01f0b12601a9821 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 14:53:12 +0000 Subject: [PATCH 19/67] increase indent --- TSF/trustable/assertions/TA-Analysis.md | 2 +- TSF/trustable/assertions/TA-Behaviours.md | 2 +- TSF/trustable/assertions/TA-Confidence.md | 2 +- TSF/trustable/assertions/TA-Constraints.md | 4 ++-- TSF/trustable/assertions/TA-Data.md | 2 +- TSF/trustable/assertions/TA-Fixes.md | 2 +- TSF/trustable/assertions/TA-Indicators.md | 2 +- TSF/trustable/assertions/TA-Inputs.md | 2 +- TSF/trustable/assertions/TA-Iterations.md | 2 +- TSF/trustable/assertions/TA-Methodologies.md | 2 +- TSF/trustable/assertions/TA-Misbehaviours.md | 2 +- TSF/trustable/assertions/TA-Releases.md | 2 +- TSF/trustable/assertions/TA-Supply-Chain.md | 2 +- TSF/trustable/assertions/TA-Tests.md | 2 +- TSF/trustable/assertions/TA-Updates.md | 2 +- TSF/trustable/assertions/TA-Validation.md | 2 +- 16 files changed, 17 insertions(+), 17 deletions(-) diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-Analysis.md index 38bec7a993..6f46a98521 100644 --- a/TSF/trustable/assertions/TA-Analysis.md +++ b/TSF/trustable/assertions/TA-Analysis.md @@ -1,4 +1,4 @@ -# Checklist for TA-ANALYSIS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-ANALYSIS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * What fraction of Expectations are covered by the test data?
Answer: Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. diff --git a/TSF/trustable/assertions/TA-Behaviours.md b/TSF/trustable/assertions/TA-Behaviours.md index 3d22bae965..e9bdc1128f 100644 --- a/TSF/trustable/assertions/TA-Behaviours.md +++ b/TSF/trustable/assertions/TA-Behaviours.md @@ -1,4 +1,4 @@ -# Checklist for TA-BEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-BEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How has the list of Expectations varied over time?
Answer: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. diff --git a/TSF/trustable/assertions/TA-Confidence.md b/TSF/trustable/assertions/TA-Confidence.md index ffcb14d063..4699848741 100644 --- a/TSF/trustable/assertions/TA-Confidence.md +++ b/TSF/trustable/assertions/TA-Confidence.md @@ -1,4 +1,4 @@ -# Checklist for TA-CONFIDENCE from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-CONFIDENCE from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * What is the algorithm for combining/comparing the scores?
Answer: It is the standard algorithm of trudag. diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md index 873ff34571..b6181778dc 100644 --- a/TSF/trustable/assertions/TA-Constraints.md +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -1,7 +1,7 @@ -# Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * Are the constraints grounded in realistic expectations, backed by real-world examples?
- Answer: Yes, the constraints are NOICE + Answer: Yes, the constraints are * Do they effectively guide downstream consumers in expanding upon existing Statements?
Answer: * Do they provide clear guidance for upstreams on reusing components with well-defined claims?
diff --git a/TSF/trustable/assertions/TA-Data.md b/TSF/trustable/assertions/TA-Data.md index 8beac041b7..761f8e57c7 100644 --- a/TSF/trustable/assertions/TA-Data.md +++ b/TSF/trustable/assertions/TA-Data.md @@ -1,4 +1,4 @@ -# Checklist for TA-DATA from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-DATA from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * Is all test data stored with long-term accessibility?
Answer: If we assume that github is long-term accessible, then yes. diff --git a/TSF/trustable/assertions/TA-Fixes.md b/TSF/trustable/assertions/TA-Fixes.md index 4ec52aec32..ded4ee56a2 100644 --- a/TSF/trustable/assertions/TA-Fixes.md +++ b/TSF/trustable/assertions/TA-Fixes.md @@ -1,4 +1,4 @@ -# Checklist for TA-FIXES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-FIXES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How many faults have we identified in XYZ?
diff --git a/TSF/trustable/assertions/TA-Indicators.md b/TSF/trustable/assertions/TA-Indicators.md index a7f3eaa286..7c0e62b7c1 100644 --- a/TSF/trustable/assertions/TA-Indicators.md +++ b/TSF/trustable/assertions/TA-Indicators.md @@ -1,4 +1,4 @@ -# Checklist for TA-INDICATORS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-INDICATORS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How appropriate/thorough are the analyses that led to the indicators?
Answer: There are no warning indicators for the none misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259. diff --git a/TSF/trustable/assertions/TA-Inputs.md b/TSF/trustable/assertions/TA-Inputs.md index c3281c45fb..07cb3d9944 100644 --- a/TSF/trustable/assertions/TA-Inputs.md +++ b/TSF/trustable/assertions/TA-Inputs.md @@ -1,4 +1,4 @@ -# Checklist for TA-INPUTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-INPUTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) The single_include/nlohmann/json.hpp is the single and only component of the library. diff --git a/TSF/trustable/assertions/TA-Iterations.md b/TSF/trustable/assertions/TA-Iterations.md index 52b0e770d5..e7e1a3a41b 100644 --- a/TSF/trustable/assertions/TA-Iterations.md +++ b/TSF/trustable/assertions/TA-Iterations.md @@ -1,4 +1,4 @@ -# Checklist for TA-ITERATIONS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-ITERATIONS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How much of the software is provided as binary only, expressed as a fraction of the BoM list?
Answer: None. diff --git a/TSF/trustable/assertions/TA-Methodologies.md b/TSF/trustable/assertions/TA-Methodologies.md index 8d495ea7f9..9400330ec3 100644 --- a/TSF/trustable/assertions/TA-Methodologies.md +++ b/TSF/trustable/assertions/TA-Methodologies.md @@ -1,4 +1,4 @@ -# Checklist for TA-METHODOLOGIES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-METHODOLOGIES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) This project follows purely the Methodologies of Eclipse S-CORE. diff --git a/TSF/trustable/assertions/TA-Misbehaviours.md b/TSF/trustable/assertions/TA-Misbehaviours.md index 688f44e5ad..d04879591f 100644 --- a/TSF/trustable/assertions/TA-Misbehaviours.md +++ b/TSF/trustable/assertions/TA-Misbehaviours.md @@ -1,4 +1,4 @@ -# Checklist for TA-MISBEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-MISBEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How has the list of misbehaviours varied over time?
Answer: diff --git a/TSF/trustable/assertions/TA-Releases.md b/TSF/trustable/assertions/TA-Releases.md index 851d194eea..9ec7516c0b 100644 --- a/TSF/trustable/assertions/TA-Releases.md +++ b/TSF/trustable/assertions/TA-Releases.md @@ -1,4 +1,4 @@ -# Checklist for TA-RELEASES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-RELEASES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How confident are we that all components are taken from within our controlled environment?
Answer: This library does not take anything from outside of this repository. diff --git a/TSF/trustable/assertions/TA-Supply-Chain.md b/TSF/trustable/assertions/TA-Supply-Chain.md index 9fc95469dc..5be1c82f5b 100644 --- a/TSF/trustable/assertions/TA-Supply-Chain.md +++ b/TSF/trustable/assertions/TA-Supply-Chain.md @@ -1,4 +1,4 @@ -# Checklist for TA-SUPPLY_CHAIN from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-SUPPLY_CHAIN from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * Could there be other components, missed from the list?
Answer: No. diff --git a/TSF/trustable/assertions/TA-Tests.md b/TSF/trustable/assertions/TA-Tests.md index 1fec8ad089..8bda0ea007 100644 --- a/TSF/trustable/assertions/TA-Tests.md +++ b/TSF/trustable/assertions/TA-Tests.md @@ -1,4 +1,4 @@ -# Checklist for TA-TESTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-TESTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible?
Answer: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO) diff --git a/TSF/trustable/assertions/TA-Updates.md b/TSF/trustable/assertions/TA-Updates.md index 179df0fc88..7083ecb8e7 100644 --- a/TSF/trustable/assertions/TA-Updates.md +++ b/TSF/trustable/assertions/TA-Updates.md @@ -1,4 +1,4 @@ -# Checklist for TA-UPDATES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-UPDATES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * Where are the change and configuration management controls specified?
Answer: WIP diff --git a/TSF/trustable/assertions/TA-Validation.md b/TSF/trustable/assertions/TA-Validation.md index 3fcc8a5100..65b56a03b2 100644 --- a/TSF/trustable/assertions/TA-Validation.md +++ b/TSF/trustable/assertions/TA-Validation.md @@ -1,4 +1,4 @@ -# Checklist for TA-VALIDATION from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) +#### Checklist for TA-VALIDATION from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) I DO NOT FEEL CONFIDENT TO ANSWER THIS! From fa376332e6dbca11f44254f6e281c6b426b110e9 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 14:57:23 +0000 Subject: [PATCH 20/67] do constraints --- TSF/trustable/assertions/TA-Constraints.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md index b6181778dc..5ea6181929 100644 --- a/TSF/trustable/assertions/TA-Constraints.md +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -1,16 +1,16 @@ #### Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) * Are the constraints grounded in realistic expectations, backed by real-world examples?
- Answer: Yes, the constraints are + Answer: The constraints originate from S-CORE and the library itself. * Do they effectively guide downstream consumers in expanding upon existing Statements?
- Answer: + Answer: ????? * Do they provide clear guidance for upstreams on reusing components with well-defined claims?
- Answer: + Answer: ????? * Are any Statements explicitly designated as not reusable or adaptable?
- Answer: + Answer: NO????? * Are there worked examples from downstream or upstream users demonstrating these constraints in practice?
- Answer: + Answer: ???? * Have there been any documented misunderstandings from users, and are these visibly resolved?
- Answer: + Answer: Yes, it is documented that the brace initialisation regularly leads to confusion. * Do external users actively keep up with updates, and are they properly notified of any changes?
- Answer: \ No newline at end of file + Answer: NO CLUE ?????? \ No newline at end of file From 8fcf43862a1969c2b2e0e780b18a6dee0b94fa88 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 17 Oct 2025 15:40:08 +0000 Subject: [PATCH 21/67] pretty printing --- TSF/trustable/assertions/TA-Analysis.md | 62 +++++++++++++++----- TSF/trustable/assertions/TA-Behaviours.md | 47 +++++++++++---- TSF/trustable/assertions/TA-Confidence.md | 15 +++-- TSF/trustable/assertions/TA-Constraints.md | 27 ++++++--- TSF/trustable/assertions/TA-Data.md | 39 ++++++++---- TSF/trustable/assertions/TA-Fixes.md | 59 ++++++++++++++----- TSF/trustable/assertions/TA-Indicators.md | 47 +++++++++++---- TSF/trustable/assertions/TA-Inputs.md | 59 ++++++++++++++----- TSF/trustable/assertions/TA-Iterations.md | 19 ++++-- TSF/trustable/assertions/TA-Methodologies.md | 39 ++++++++---- TSF/trustable/assertions/TA-Misbehaviours.md | 47 +++++++++++---- TSF/trustable/assertions/TA-Releases.md | 23 ++++++-- TSF/trustable/assertions/TA-Supply-Chain.md | 23 ++++++-- TSF/trustable/assertions/TA-Tests.md | 23 ++++++-- TSF/trustable/assertions/TA-Updates.md | 23 ++++++-- TSF/trustable/assertions/TA-Validation.md | 51 ++++++++++++---- 16 files changed, 448 insertions(+), 155 deletions(-) diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-Analysis.md index 6f46a98521..aa98a6cf7c 100644 --- a/TSF/trustable/assertions/TA-Analysis.md +++ b/TSF/trustable/assertions/TA-Analysis.md @@ -1,34 +1,64 @@ #### Checklist for TA-ANALYSIS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* What fraction of Expectations are covered by the test data?
+* What fraction of Expectations are covered by the test data? + Answer: Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. -* What fraction of Misbehaviours are covered by the monitored indicator data?
+ +* What fraction of Misbehaviours are covered by the monitored indicator data? + Answer: For the intended use-case, no misbehaviours have been identified. Furthermore, no indicator data are collected. -* How confident are we that the indicator data are accurate and timely?
+ +* How confident are we that the indicator data are accurate and timely? + Answer: No indicator data are collected. -* How reliable is the monitoring process?
+ +* How reliable is the monitoring process? + Answer: Due to no indicator data being collected, there is no monitoring process -* How well does the production data correlate with our test data?
+ +* How well does the production data correlate with our test data? + Answer: Due to the general nature of the library, there are no production data. -* Are we publishing our data analysis?
+ +* Are we publishing our data analysis? + Answer: Since we have no production data with which to compare our not collected indicator data or our test data, no data analysis is done, which is not published. -* Are we comparing and analysing production data vs test?
+ +* Are we comparing and analysing production data vs test? + Answer: There are no production data. -* Are our results getting better, or worse?
+ +* Are our results getting better, or worse? + Answer: Neither. -* Are we addressing spikes/regressions?
+ +* Are we addressing spikes/regressions? Answer: There are no spikes in the non-existent indicator data. If a test ever fails, then the spike is investigated. The results of fuzz testing are investigated in the original nlohmann/json. -* Do we have sensible/appropriate target failure rates?
+ +* Do we have sensible/appropriate target failure rates? + Answer: For the unit and integration tests, 0. -* Do we need to check the targets?
+ +* Do we need to check the targets? + Answer: ???????? No. -* Are we achieving the targets?
+ +* Are we achieving the targets? + Answer: ??????? Yes. -* Are all underlying assumptions and target conditions for the analysis specified?
+ +* Are all underlying assumptions and target conditions for the analysis specified? + Answer: There is no analysis. -* Have the underlying assumptions been verified using known good data?
+ +* Have the underlying assumptions been verified using known good data? + Answer: There is no analysis so that there are no underlying assumptions. -* Has the Misbehaviour identification process been verified using known bad data?
+ +* Has the Misbehaviour identification process been verified using known bad data? + Answer: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour. -* Are results shown to be reproducible?
+ +* Are results shown to be reproducible? + Answer: The tests can be reproduced on every machine. diff --git a/TSF/trustable/assertions/TA-Behaviours.md b/TSF/trustable/assertions/TA-Behaviours.md index e9bdc1128f..08b093bf14 100644 --- a/TSF/trustable/assertions/TA-Behaviours.md +++ b/TSF/trustable/assertions/TA-Behaviours.md @@ -1,26 +1,49 @@ #### Checklist for TA-BEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How has the list of Expectations varied over time?
+* How has the list of Expectations varied over time? + Answer: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. -* How confident can we be that this list is comprehensive?
+ +* How confident can we be that this list is comprehensive? + Answer: The list of expectations has been collected amongst the stakeholders in S-CORE, so that we are very confident that the list is comprehensive. -* Could some participants have incentives to manipulate information?
+ +* Could some participants have incentives to manipulate information? + Answer: We can not imagine any reason. -* Could there be whole categories of Expectations still undiscovered?
+ +* Could there be whole categories of Expectations still undiscovered? + Answer: It is unlikely, but the parsing of cbor could become relevant at some time. -* Can we identify Expectations that have been understood but not specified?
+ +* Can we identify Expectations that have been understood but not specified? + Answer: No. -* Can we identify some new Expectations, right now?
+ +* Can we identify some new Expectations, right now? + Answer: No. -* How confident can we be that this list covers all critical requirements?
+ +* How confident can we be that this list covers all critical requirements? + Answer: We can not think of any more critical requirement of a JSON parser than to parse JSON data. -* How comprehensive is the list of tests?
+ +* How comprehensive is the list of tests? + Answer: The tests cover 99.186% of the code, according to coveralls, so we assume that the list of tests is quite comprehensive. -* Is every Expectation covered by at least one implemented test?
+ +* Is every Expectation covered by at least one implemented test? + Answer: Yes, both of the expectations are covered by at least one implemented test. -* Are there any Expectations where we believe more coverage would help?
+ +* Are there any Expectations where we believe more coverage would help? + Answer: No. -* How do dependencies affect Expectations, and are their properties verifiable?
+ +* How do dependencies affect Expectations, and are their properties verifiable? + Answer: There are no dependencies. -* Are input analysis findings from components, tools, and data considered in relation to Expectations?
+ +* Are input analysis findings from components, tools, and data considered in relation to Expectations? + Answer: No findings have been found. diff --git a/TSF/trustable/assertions/TA-Confidence.md b/TSF/trustable/assertions/TA-Confidence.md index 4699848741..b171f97d79 100644 --- a/TSF/trustable/assertions/TA-Confidence.md +++ b/TSF/trustable/assertions/TA-Confidence.md @@ -1,10 +1,17 @@ #### Checklist for TA-CONFIDENCE from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* What is the algorithm for combining/comparing the scores?
+* What is the algorithm for combining/comparing the scores? + Answer: It is the standard algorithm of trudag. -* How confident are we that this algorithm is fit for purpose?
+ +* How confident are we that this algorithm is fit for purpose? + Answer: We have no reason to assume that the standard algorithm is not fit for our purpose. -* What are the trends for each score?
+ +* What are the trends for each score? + Answer: CAN NOT BE ANSWERED NOW -* How well do our scores correlate with external feedback signals?
+ +* How well do our scores correlate with external feedback signals? + Answer: CAN NOT BE ANSWERED NOW \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md index 5ea6181929..2ca46e3233 100644 --- a/TSF/trustable/assertions/TA-Constraints.md +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -1,16 +1,29 @@ #### Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* Are the constraints grounded in realistic expectations, backed by real-world examples?
+* Are the constraints grounded in realistic expectations, backed by real-world examples? + Answer: The constraints originate from S-CORE and the library itself. -* Do they effectively guide downstream consumers in expanding upon existing Statements?
+ +* Do they effectively guide downstream consumers in expanding upon existing Statements? + Answer: ????? -* Do they provide clear guidance for upstreams on reusing components with well-defined claims?
+ +* Do they provide clear guidance for upstreams on reusing components with well-defined claims? + Answer: ????? -* Are any Statements explicitly designated as not reusable or adaptable?
+ +* Are any Statements explicitly designated as not reusable or adaptable? + Answer: NO????? -* Are there worked examples from downstream or upstream users demonstrating these constraints in practice?
+ +* Are there worked examples from downstream or upstream users demonstrating these constraints in practice? + Answer: ???? -* Have there been any documented misunderstandings from users, and are these visibly resolved?
+ +* Have there been any documented misunderstandings from users, and are these visibly resolved? + Answer: Yes, it is documented that the brace initialisation regularly leads to confusion. -* Do external users actively keep up with updates, and are they properly notified of any changes?
+ +* Do external users actively keep up with updates, and are they properly notified of any changes? + Answer: NO CLUE ?????? \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Data.md b/TSF/trustable/assertions/TA-Data.md index 761f8e57c7..2d8fcedce6 100644 --- a/TSF/trustable/assertions/TA-Data.md +++ b/TSF/trustable/assertions/TA-Data.md @@ -1,22 +1,41 @@ #### Checklist for TA-DATA from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* Is all test data stored with long-term accessibility?
+* Is all test data stored with long-term accessibility? + Answer: If we assume that github is long-term accessible, then yes. -* Is all monitoring data stored with long-term accessibility?
+ +* Is all monitoring data stored with long-term accessibility? + Answer: There are no monitoring data. -* Are extensible data models implemented?
+ +* Are extensible data models implemented? + Answer: The data are stored in a sqlite database. -* Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy?
+ +* Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy? + Answer: There are no sensitive data produced, collected or stored. -* Are proper backup mechanisms in place?
+ +* Are proper backup mechanisms in place? + Answer: Not more than the default mechanisms of github. -* Are storage and backup limits tested?
+ +* Are storage and backup limits tested? + Answer: No. -* Are all data changes traceable?
+ +* Are all data changes traceable? + Answer: Yes, due to the usage of github. -* Are concurrent changes correctly managed and resolved?
+ +* Are concurrent changes correctly managed and resolved? + Answer: Yes, due to the usage of github. -* Is data accessible only to intended parties?
+ +* Is data accessible only to intended parties? + Answer: There are no unintended parties. -* Are any subsets of our data being published?
+ +* Are any subsets of our data being published? + Answer: Yes, the collected data are publicly available. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Fixes.md b/TSF/trustable/assertions/TA-Fixes.md index ded4ee56a2..32667935f0 100644 --- a/TSF/trustable/assertions/TA-Fixes.md +++ b/TSF/trustable/assertions/TA-Fixes.md @@ -1,33 +1,62 @@ #### Checklist for TA-FIXES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How many faults have we identified in XYZ?
+* How many faults have we identified in XYZ? + Answer: There are no identifiable faults concerning the expectations. -* How many unknown faults remain to be found, based on the number that have been processed so far?
+ +* How many unknown faults remain to be found, based on the number that have been processed so far? + Answer: It is unlikely that there are unknown faults concerning the expectations. -* Is there any possibility that people could be motivated to manipulate the lists (e.g. bug bonus or pressure to close).
+ +* Is there any possibility that people could be motivated to manipulate the lists (e.g. bug bonus or pressure to close). + Answer: Since the project is entirely open source, it is quite unlikely. -* How many faults may be unrecorded (or incorrectly closed, or downplayed)?
+ +* How many faults may be unrecorded (or incorrectly closed, or downplayed)? + Answer: There may be none concerning the expectations. -* How do we collect lists of bugs and known vulnerabilities from components?
+ +* How do we collect lists of bugs and known vulnerabilities from components? + Answer: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. -* How (and how often) do we check these lists for relevant bugs and known vulnerabilities?
+ +* How (and how often) do we check these lists for relevant bugs and known vulnerabilities? + Answer: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is enticed to check this issue on applicability. -* How confident can we be that the lists are honestly maintained?
+ +* How confident can we be that the lists are honestly maintained? + Answer: We can not imagine a reason why the list could be dishonestly maintained. -* Could some participants have incentives to manipulate information?
+ +* Could some participants have incentives to manipulate information? + Answer: We can not think of a reason why. -* How confident are we that the lists are comprehensive?
+ +* How confident are we that the lists are comprehensive? + Answer: We have no reason to assume that discovered bugs are not reported to nlohmann/json. -* Could there be whole categories of bugs/vulnerabilities still undiscovered?
+ +* Could there be whole categories of bugs/vulnerabilities still undiscovered? + Answer: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug. -* How effective is our triage/prioritisation?
+ +* How effective is our triage/prioritisation? + Answer: UNKNOWN -* How many components have never been updated?
+ +* How many components have never been updated? + Answer: None, the single component is up to date. -* How confident are we that we could update them?
+ +* How confident are we that we could update them? + Answer: If nlohmann/json would release an new version, we are very confident that we can update to that version. -* How confident are we that outstanding fixes do not impact our Expectations?
+ +* How confident are we that outstanding fixes do not impact our Expectations? + Answer: We have not found any outstanding fixes impacting our expectations. -* How confident are we that outstanding fixes do not address Misbehaviours?
+ +* How confident are we that outstanding fixes do not address Misbehaviours? + Answer: We are very confident that none of the outstanding fixes do not affect the no identified misbehaviours. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Indicators.md b/TSF/trustable/assertions/TA-Indicators.md index 7c0e62b7c1..d328e56d3b 100644 --- a/TSF/trustable/assertions/TA-Indicators.md +++ b/TSF/trustable/assertions/TA-Indicators.md @@ -1,26 +1,49 @@ #### Checklist for TA-INDICATORS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How appropriate/thorough are the analyses that led to the indicators?
+* How appropriate/thorough are the analyses that led to the indicators? + Answer: There are no warning indicators for the none misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259. -* How confident can we be that the list of indicators is comprehensive?
+ +* How confident can we be that the list of indicators is comprehensive? + Answer: Since there are none, the list is not comprehensive at all. -* Could there be whole categories of warning indicators still missing?
+ +* Could there be whole categories of warning indicators still missing? + Answer: Yes, there could. These warning indicators, however, should be implemented in the wrapper defining the interface between the library and the project using it. -* How has the list of advance warning indicators varied over time?
+ +* How has the list of advance warning indicators varied over time? + Answer: It has stayed constant. -* How confident are we that the indicators are leading/predictive?
+ +* How confident are we that the indicators are leading/predictive? + Answer: There are none. -* Are there misbehaviours that have no advance warning indicators?
+ +* Are there misbehaviours that have no advance warning indicators? + Answer: There are no misbehaviours identified. -* Can we collect data for all indicators?
+ +* Can we collect data for all indicators? + Answer: We do collect data for all indicators that are currently implemented. -* Are the monitoring mechanisms used included in our Trustable scope?
+ +* Are the monitoring mechanisms used included in our Trustable scope? + Answer: No, but there are also none. -* Are there gaps or trends in the data?
+ +* Are there gaps or trends in the data? + Answer: There are no data. -* If there are gaps or trends, are they analysed and addressed?
+ +* If there are gaps or trends, are they analysed and addressed? + Answer: There are no data. -* Is the data actually predictive/useful?
+ +* Is the data actually predictive/useful? + Answer: There are no data. -* Are indicators from code, component, tool, or data inspections taken into consideration?
+ +* Are indicators from code, component, tool, or data inspections taken into consideration? + Answer: There are no indicators. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Inputs.md b/TSF/trustable/assertions/TA-Inputs.md index 07cb3d9944..8f3f0a10d9 100644 --- a/TSF/trustable/assertions/TA-Inputs.md +++ b/TSF/trustable/assertions/TA-Inputs.md @@ -2,33 +2,62 @@ The single_include/nlohmann/json.hpp is the single and only component of the library. -* Are there components that are not on the list?
+* Are there components that are not on the list? + Answer: No. -* Are there assessments for all components?
+ +* Are there assessments for all components? + Answer: ????? -* Has an assessment been done for the current version of the component?
+ +* Has an assessment been done for the current version of the component? + Answer: ????? -* Have sources of bug and/or vulnerability data been identified?
+ +* Have sources of bug and/or vulnerability data been identified? + Answer: There are no bug and/or vulnerability data. -* Have additional tests and/or Expectations been documented and linked to component assessment?
+ +* Have additional tests and/or Expectations been documented and linked to component assessment? + Answer: ?????? -* Are component tests run when integrating new versions of components?
+ +* Are component tests run when integrating new versions of components? + Answer: There are no further components. -* Are there tools that are not on the list?
+ +* Are there tools that are not on the list? + Answer: No. -* Are there impact assessments for all tools?
+ +* Are there impact assessments for all tools? + Answer: ?????? -* Have tools with high impact been qualified?
+ +* Have tools with high impact been qualified? + Answer: There are no tools with high impact. -* Were assessments or reviews done for the current tool versions?
+ +* Were assessments or reviews done for the current tool versions? + Answer: ????? -* Have additional tests and/or Expectations been documented and linked to tool assessments?
+ +* Have additional tests and/or Expectations been documented and linked to tool assessments? + Answer: No. -* Are tool tests run when integrating new versions of tools?
+ +* Are tool tests run when integrating new versions of tools? + Answer: There are no tools. -* Are tool and component tests included in release preparation?
+ +* Are tool and component tests included in release preparation? + Answer: Yes, the tests of the library are included in the release. -* Can patches be applied, and then upstreamed for long-term maintenance?
+ +* Can patches be applied, and then upstreamed for long-term maintenance? + Answer: Yes, if ever a misbehaviour is found and patched, then a pull-request to the original nlohmann/json repository can be opened to upstream the changes. -* Do all dependencies comply with acceptable licensing terms?
+ +* Do all dependencies comply with acceptable licensing terms? + Answer: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Iterations.md b/TSF/trustable/assertions/TA-Iterations.md index e7e1a3a41b..4c2daada2d 100644 --- a/TSF/trustable/assertions/TA-Iterations.md +++ b/TSF/trustable/assertions/TA-Iterations.md @@ -1,12 +1,21 @@ #### Checklist for TA-ITERATIONS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How much of the software is provided as binary only, expressed as a fraction of the BoM list?
+* How much of the software is provided as binary only, expressed as a fraction of the BoM list? + Answer: None. -* How much is binary, expressed as a fraction of the total storage footprint?
+ +* How much is binary, expressed as a fraction of the total storage footprint? + Answer: None. -* For binaries, what claims are being made and how confident are we in the people/organisations making the claims?
+ +* For binaries, what claims are being made and how confident are we in the people/organisations making the claims? + Answer: There are no binaries. -* For third-party source code, what claims are we making, and how confident are we about these claims?
+ +* For third-party source code, what claims are we making, and how confident are we about these claims? + Answer: There is no third-party source code in the library. -* For software developed by us, what claims are we making, and how confident are we about these claims?
+ +* For software developed by us, what claims are we making, and how confident are we about these claims? + Answer: This is the remainder of the documentation. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Methodologies.md b/TSF/trustable/assertions/TA-Methodologies.md index 9400330ec3..c6a73b7aeb 100644 --- a/TSF/trustable/assertions/TA-Methodologies.md +++ b/TSF/trustable/assertions/TA-Methodologies.md @@ -2,23 +2,42 @@ This project follows purely the Methodologies of Eclipse S-CORE. -* Are the identified gaps documented clearly to justify using a manual process?
+* Are the identified gaps documented clearly to justify using a manual process? + Answer: -* Are the goals for each process clearly defined?
+ +* Are the goals for each process clearly defined? + Answer: -* Is the sequence of procedures documented in an unambiguous manner?
+ +* Is the sequence of procedures documented in an unambiguous manner? + Answer: -* Can improvements to the processes be suggested and implemented?
+ +* Can improvements to the processes be suggested and implemented? + Answer: -* How frequently are processes changed?
+ +* How frequently are processes changed? + Answer: -* How are changes to manual processes communicated?
+ +* How are changes to manual processes communicated? + Answer: -* Are there any exceptions to the processes?
+ +* Are there any exceptions to the processes? + Answer: -* How is evidence of process adherence recorded?
+ +* How is evidence of process adherence recorded? + Answer: -* How is the effectiveness of the process evaluated?
+ +* How is the effectiveness of the process evaluated? + Answer: -* Is ongoing training required to follow these processes?
+ +* Is ongoing training required to follow these processes? + Answer: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Misbehaviours.md b/TSF/trustable/assertions/TA-Misbehaviours.md index d04879591f..53393e477a 100644 --- a/TSF/trustable/assertions/TA-Misbehaviours.md +++ b/TSF/trustable/assertions/TA-Misbehaviours.md @@ -1,26 +1,49 @@ #### Checklist for TA-MISBEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How has the list of misbehaviours varied over time?
+* How has the list of misbehaviours varied over time? + Answer: -* How confident can we be that this list is comprehensive?
+ +* How confident can we be that this list is comprehensive? + Answer: -* How well do the misbehaviours map to the expectations?
+ +* How well do the misbehaviours map to the expectations? + Answer: -* Could some participants have incentives to manipulate information?
+ +* Could some participants have incentives to manipulate information? + Answer: -* Could there be whole categories of misbehaviours still undiscovered?
+ +* Could there be whole categories of misbehaviours still undiscovered? + Answer: -* Can we identify misbehaviours that have been understood but not specified?
+ +* Can we identify misbehaviours that have been understood but not specified? + Answer: -* Can we identify some new misbehaviours, right now?
+ +* Can we identify some new misbehaviours, right now? + Answer: -* Is every misbehaviour represented by at least one fault induction test?
+ +* Is every misbehaviour represented by at least one fault induction test? + Answer: -* Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately?
+ +* Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately? + Answer: -* Are all the fault induction results actually collected?
+ +* Are all the fault induction results actually collected? + Answer: -* Are the results evaluated?
+ +* Are the results evaluated? + Answer: -* Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations?
+ +* Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations? + Answer: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Releases.md b/TSF/trustable/assertions/TA-Releases.md index 9ec7516c0b..b411f76fde 100644 --- a/TSF/trustable/assertions/TA-Releases.md +++ b/TSF/trustable/assertions/TA-Releases.md @@ -1,14 +1,25 @@ #### Checklist for TA-RELEASES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How confident are we that all components are taken from within our controlled environment?
+* How confident are we that all components are taken from within our controlled environment? + Answer: This library does not take anything from outside of this repository. -* How confident are we that all of the tools we are using are also under our control?
+ +* How confident are we that all of the tools we are using are also under our control? + Answer: The version of nlohmann/json that is documented with this documentation is under the full control of the Eclipse S-CORE organisation. -* Are our builds repeatable on a different server, or in a different context?
+ +* Are our builds repeatable on a different server, or in a different context? + Answer: Since there is no "build" of the header-only library, yes. -* How sure are we that our builds don't access the internet?
+ +* How sure are we that our builds don't access the internet? + Answer: There is no implemented access to the internet in the library itself. The testsuite is downloaded from a within Eclipse S-CORE. -* How many of our components are non-reproducible?
+ +* How many of our components are non-reproducible? + Answer: The single component is reproducible. -* How confident are we that our reproducibility check is correct?
+ +* How confident are we that our reproducibility check is correct? + Answer: Quite. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Supply-Chain.md b/TSF/trustable/assertions/TA-Supply-Chain.md index 5be1c82f5b..2013fcc0bb 100644 --- a/TSF/trustable/assertions/TA-Supply-Chain.md +++ b/TSF/trustable/assertions/TA-Supply-Chain.md @@ -1,14 +1,25 @@ #### Checklist for TA-SUPPLY_CHAIN from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* Could there be other components, missed from the list?
+* Could there be other components, missed from the list? + Answer: No. -* Does the list include all toolchain components?
+ +* Does the list include all toolchain components? + Answer: Yes. -* Does the toolchain include a bootstrap?
+ +* Does the toolchain include a bootstrap? + Answer: No. -* Could the content of a mirrored project be compromised by an upstream change?
+ +* Could the content of a mirrored project be compromised by an upstream change? + Answer: No. -* Are mirrored projects up to date with the upstream project?
+ +* Are mirrored projects up to date with the upstream project? + Answer: Yes, the library is up to date with the most recent release of the original nlohmann/json -* Are mirrored projects based on the correct upstream?
+ +* Are mirrored projects based on the correct upstream? + Answer: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Tests.md b/TSF/trustable/assertions/TA-Tests.md index 8bda0ea007..0958f7b00f 100644 --- a/TSF/trustable/assertions/TA-Tests.md +++ b/TSF/trustable/assertions/TA-Tests.md @@ -1,14 +1,25 @@ #### Checklist for TA-TESTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible?
+* How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible? + Answer: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO) -* Are any exceptions identified, documented and justified?
+ +* Are any exceptions identified, documented and justified? + Answer: Not applicable. -* How confident are we that all test components are taken from within our controlled environment?
+ +* How confident are we that all test components are taken from within our controlled environment? + Answer: All tests are either self-contained or download test data from [within Eclipse S-CORE](https://github.com/eclipse-score/inc_nlohmann_json/tree/json_test_data_version_3_1_0_mirror). -* How confident are we that all of the test environments we are using are also under our control?
+ +* How confident are we that all of the test environments we are using are also under our control? + Answer: ???? The environments are standard docker images of ubuntu and standard versions of compilers. -* Do we record all test environment components, including hardware and infrastructure used for exercising tests and processing input/output data?
+ +* Do we record all test environment components, including hardware and infrastructure used for exercising tests and processing input/output data? + Answer: No, since the tests are independent from hard-ware, these data are not collected. -* How confident are we that all tests scenarios are repeatable?
+ +* How confident are we that all tests scenarios are repeatable? + Answer: Very. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Updates.md b/TSF/trustable/assertions/TA-Updates.md index 7083ecb8e7..33e34b9a1c 100644 --- a/TSF/trustable/assertions/TA-Updates.md +++ b/TSF/trustable/assertions/TA-Updates.md @@ -1,14 +1,25 @@ #### Checklist for TA-UPDATES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -* Where are the change and configuration management controls specified?
+* Where are the change and configuration management controls specified? + Answer: WIP -* Are these controls enforced for all of components, tools, data, documentation and configurations?
+ +* Are these controls enforced for all of components, tools, data, documentation and configurations? + Answer: The S-CORE Methodology is followed, compliance with which enforces the change process to be followed. -* Are there any ways in which these controls can be subverted, and have we mitigated them?
+ +* Are there any ways in which these controls can be subverted, and have we mitigated them? + Answer: Yes, the change process can just not be followed. We have no real method to enforce it other than to trust that the committers follow the S-CORE processes. -* Does change control capture all potential regressions?
+ +* Does change control capture all potential regressions? + Answer: NO CLUE -* Is change control timely enough?
+ +* Is change control timely enough? + Answer: Not applicable, as far as can be understood right now, there is no immanent need to keep the library up to date. -* Are all guidance and checks understandable and consistently followed?
+ +* Are all guidance and checks understandable and consistently followed? + Answer: WIP \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Validation.md b/TSF/trustable/assertions/TA-Validation.md index 65b56a03b2..09a6d70fd6 100644 --- a/TSF/trustable/assertions/TA-Validation.md +++ b/TSF/trustable/assertions/TA-Validation.md @@ -2,29 +2,54 @@ I DO NOT FEEL CONFIDENT TO ANSWER THIS! -* Is the selection of tests correct?
+* Is the selection of tests correct? + Answer: -* Are the tests executed enough times?
+ +* Are the tests executed enough times? + Answer: -* How confident are we that all test results are being captured?
+ +* How confident are we that all test results are being captured? + Answer: -* Can we look at any individual test result, and establish what it relates to?
+ +* Can we look at any individual test result, and establish what it relates to? + Answer: -* Can we trace from any test result to the expectation it relates to?
+ +* Can we trace from any test result to the expectation it relates to? + Answer: -* Can we identify precisely which environment (software and hardware) were used?
+ +* Can we identify precisely which environment (software and hardware) were used? + Answer: -* How many pass/fail results would be expected, based on the scheduled tests?
+ +* How many pass/fail results would be expected, based on the scheduled tests? + Answer: -* Do we have all of the expected results?
+ +* Do we have all of the expected results? + Answer: -* Do we have time-series data for all of those results?
+ +* Do we have time-series data for all of those results? + Answer: -* If there are any gaps, do we understand why?
+ +* If there are any gaps, do we understand why? + Answer: -* Are the test validation strategies credible and appropriate?
+ +* Are the test validation strategies credible and appropriate? + Answer: -* What proportion of the implemented tests are validated?
+ +* What proportion of the implemented tests are validated? + Answer: -* Have the tests been verified using known good and bad data?
+ +* Have the tests been verified using known good and bad data? + Answer: \ No newline at end of file From 7de7bc3815a8c474191038fbfe1126dd00c2abc9 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 20 Oct 2025 08:10:42 +0000 Subject: [PATCH 22/67] reference for pretty printing --- .dotstop_extensions/references.py | 32 ++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index 747093e06d..cebcc04de1 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -1,7 +1,6 @@ from pathlib import Path from trudag.dotstop.core.reference.references import BaseReference from trudag.dotstop.core.reference.references import SourceSpanReference -from trudag.dotstop.core.reference.references import LocalFileReference import requests import sqlite3 import re @@ -763,8 +762,10 @@ def as_markdown(self, filepath: None | str = None) -> str: def __str__(self) -> str: # this is used as a title in the trudag report return "List of all unit-tests" - -class VerboseFileReference(LocalFileReference): + +from trudag.dotstop.core.reference.references import LocalFileReference as LFR + +class VerboseFileReference(LFR): def __init__(self, path: str, description: str = "", **kwargs) -> None: self._path = Path(path) self._description = description @@ -791,6 +792,31 @@ def as_markdown(self, filepath: None | str = None) -> str: def __str__(self) -> str: return str(self._path) +class Checklist(LFR): + def __init__(self, path: str, **kwargs) -> None: + self._path = Path(path) + + @classmethod + def type(cls) -> str: + return "checklist" + + @property + def content(self) -> bytes: + if not self._path.is_file(): + raise ReferenceError( + f"Cannot get non-existent or non-regular file {self._path}" + ) + with self._path.open("rb") as reference_content: + return reference_content.read() + + def as_markdown(self, filepath: None | str = None) -> str: + return self.content.decode('utf-8') + + def __str__(self) -> str: + return str(self._path) + +del LFR + class NumberOfFailures(BaseReference): def __init__(self, owner: str, repo: str, branch: str | None = None) -> None: self._owner = owner From 543b664f76f311efec79f68606f8724e9480be6e Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 20 Oct 2025 13:28:45 +0000 Subject: [PATCH 23/67] minor tweaks --- TSF/trustable/assertions/TA-Analysis.md | 14 +++++------ TSF/trustable/assertions/TA-Behaviours.md | 4 +-- TSF/trustable/assertions/TA-Constraints.md | 4 +-- TSF/trustable/assertions/TA-Data.md | 4 +-- TSF/trustable/assertions/TA-Fixes.md | 12 ++++----- TSF/trustable/assertions/TA-Indicators.md | 10 ++++---- TSF/trustable/assertions/TA-Inputs.md | 12 ++++----- TSF/trustable/assertions/TA-Misbehaviours.md | 24 +++++++++--------- TSF/trustable/assertions/TA-Supply-Chain.md | 8 +++--- TSF/trustable/assertions/TA-Tests.md | 6 ++--- TSF/trustable/assertions/TA-Updates.md | 2 +- TSF/trustable/assertions/TA-Validation.md | 26 ++++++++++---------- 12 files changed, 63 insertions(+), 63 deletions(-) diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-Analysis.md index aa98a6cf7c..028c4f23ef 100644 --- a/TSF/trustable/assertions/TA-Analysis.md +++ b/TSF/trustable/assertions/TA-Analysis.md @@ -14,7 +14,7 @@ * How reliable is the monitoring process? - Answer: Due to no indicator data being collected, there is no monitoring process + Answer: Due to no indicator data being collected, there is no monitoring process. * How well does the production data correlate with our test data? @@ -37,23 +37,23 @@ * Do we have sensible/appropriate target failure rates? - Answer: For the unit and integration tests, 0. + Answer: For the unit and integration tests, zero. The target failure rate of fuzz testing is not under our control. * Do we need to check the targets? - Answer: ???????? No. + Answer: For the unit and integration tests, no. Since the fuzz testing runs and is investigated in the original nlohmann/json, there is no need to check the target. * Are we achieving the targets? - Answer: ??????? Yes. + Answer: For the unit and integration tests, yes. The achieving of the targets for the fuzz-testing is evaluated within the original nlohmann/json. * Are all underlying assumptions and target conditions for the analysis specified? - Answer: There is no analysis. + Answer: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. * Have the underlying assumptions been verified using known good data? - Answer: There is no analysis so that there are no underlying assumptions. + Answer: The assumption that all unit and integration tests succeed under the expected conditions is demonstrated by the non-failure of the CI-Pipeline. * Has the Misbehaviour identification process been verified using known bad data? @@ -61,4 +61,4 @@ * Are results shown to be reproducible? - Answer: The tests can be reproduced on every machine. + Answer: It is expected that the tests can be reproduced on every modern sufficiently powerful machine. diff --git a/TSF/trustable/assertions/TA-Behaviours.md b/TSF/trustable/assertions/TA-Behaviours.md index 08b093bf14..c9712bbe70 100644 --- a/TSF/trustable/assertions/TA-Behaviours.md +++ b/TSF/trustable/assertions/TA-Behaviours.md @@ -14,7 +14,7 @@ * Could there be whole categories of Expectations still undiscovered? - Answer: It is unlikely, but the parsing of cbor could become relevant at some time. + Answer: It is unlikely, but the parsing of CBOR could become relevant at some time. * Can we identify Expectations that have been understood but not specified? @@ -26,7 +26,7 @@ * How confident can we be that this list covers all critical requirements? - Answer: We can not think of any more critical requirement of a JSON parser than to parse JSON data. + Answer: We can not think of any more critical requirement of a JSON parser in the sense of RFC8259 than to parse JSON data in the sense of RFC8259. * How comprehensive is the list of tests? diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md index 2ca46e3233..95c06d0ae1 100644 --- a/TSF/trustable/assertions/TA-Constraints.md +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -22,8 +22,8 @@ * Have there been any documented misunderstandings from users, and are these visibly resolved? - Answer: Yes, it is documented that the brace initialisation regularly leads to confusion. + Answer: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). * Do external users actively keep up with updates, and are they properly notified of any changes? - Answer: NO CLUE ?????? \ No newline at end of file + Answer: External users of the library are not necessarily automatically notified of an update, and are neither assumed nor required to keep up to date. If the external user forks the github repository, however, then github shows automatically whenever the upstream changes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Data.md b/TSF/trustable/assertions/TA-Data.md index 2d8fcedce6..e735df9f10 100644 --- a/TSF/trustable/assertions/TA-Data.md +++ b/TSF/trustable/assertions/TA-Data.md @@ -10,7 +10,7 @@ * Are extensible data models implemented? - Answer: The data are stored in a sqlite database. + Answer: The data are stored in an sqlite database. * Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy? @@ -34,7 +34,7 @@ * Is data accessible only to intended parties? - Answer: There are no unintended parties. + Answer: Since the library is open source, there are no unintended parties. * Are any subsets of our data being published? diff --git a/TSF/trustable/assertions/TA-Fixes.md b/TSF/trustable/assertions/TA-Fixes.md index 32667935f0..70d57a7247 100644 --- a/TSF/trustable/assertions/TA-Fixes.md +++ b/TSF/trustable/assertions/TA-Fixes.md @@ -15,15 +15,15 @@ * How many faults may be unrecorded (or incorrectly closed, or downplayed)? - Answer: There may be none concerning the expectations. + Answer: There may be none, at least when it concerns the expectations. * How do we collect lists of bugs and known vulnerabilities from components? - Answer: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. + Answer: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. This list is then stored using github, thereby enabling a traceability of the list. * How (and how often) do we check these lists for relevant bugs and known vulnerabilities? - Answer: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is enticed to check this issue on applicability. + Answer: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is encouraged by the change of the trustable score to check this issue on applicability. * How confident can we be that the lists are honestly maintained? @@ -39,11 +39,11 @@ * Could there be whole categories of bugs/vulnerabilities still undiscovered? - Answer: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug. + Answer: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug, instead it is likely that perceived issues due to a misunderstanding of how the library works are labelled as bug. * How effective is our triage/prioritisation? - Answer: UNKNOWN + Answer: ????? Since it is not intended to fix the library within S-CORE, but instead leave the development to the original nlohmann/json, there is no need to have a triage or prioritisation. * How many components have never been updated? @@ -59,4 +59,4 @@ * How confident are we that outstanding fixes do not address Misbehaviours? - Answer: We are very confident that none of the outstanding fixes do not affect the no identified misbehaviours. \ No newline at end of file + Answer: For all of the none identified misbehaviours, we are very confident that none of the outstanding fixes do not address them. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Indicators.md b/TSF/trustable/assertions/TA-Indicators.md index d328e56d3b..1b5cf3f7ab 100644 --- a/TSF/trustable/assertions/TA-Indicators.md +++ b/TSF/trustable/assertions/TA-Indicators.md @@ -2,15 +2,15 @@ * How appropriate/thorough are the analyses that led to the indicators? - Answer: There are no warning indicators for the none misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259. + Answer: Since no misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259 have been identified, no warning indicators are implemented. * How confident can we be that the list of indicators is comprehensive? - Answer: Since there are none, the list is not comprehensive at all. + Answer: There are no warning indicators implemented, of which we are very confident. * Could there be whole categories of warning indicators still missing? - Answer: Yes, there could. These warning indicators, however, should be implemented in the wrapper defining the interface between the library and the project using it. + Answer: Yes, there could. Within S-CORE, however, any warning indicator that is not natively implemented within the original nlohmann/json should be implemented in the wrapper defining the interface between the library and the project using it. * How has the list of advance warning indicators varied over time? @@ -26,7 +26,7 @@ * Can we collect data for all indicators? - Answer: We do collect data for all indicators that are currently implemented. + Answer: There are currently no implemented indicators, so that no data are collected. * Are the monitoring mechanisms used included in our Trustable scope? @@ -34,7 +34,7 @@ * Are there gaps or trends in the data? - Answer: There are no data. + Answer: There are no data where gaps or trends could be identified. * If there are gaps or trends, are they analysed and addressed? diff --git a/TSF/trustable/assertions/TA-Inputs.md b/TSF/trustable/assertions/TA-Inputs.md index 8f3f0a10d9..b407067959 100644 --- a/TSF/trustable/assertions/TA-Inputs.md +++ b/TSF/trustable/assertions/TA-Inputs.md @@ -28,19 +28,19 @@ The single_include/nlohmann/json.hpp is the single and only component of the lib * Are there tools that are not on the list? - Answer: No. + Answer: The library does not use external tools, except for the tools provided by the C++ standard library. * Are there impact assessments for all tools? - Answer: ?????? + Answer: ?????? The library does not use external tools for which an impact assessment has to be done. * Have tools with high impact been qualified? - Answer: There are no tools with high impact. + Answer: There are no tools with high impact. * Were assessments or reviews done for the current tool versions? - Answer: ????? + Answer: ????? The library does not use external tools for which an impact assessment has to be done. * Have additional tests and/or Expectations been documented and linked to tool assessments? @@ -48,7 +48,7 @@ The single_include/nlohmann/json.hpp is the single and only component of the lib * Are tool tests run when integrating new versions of tools? - Answer: There are no tools. + Answer: The library does not use external tools for which a new version needs to be integrated. * Are tool and component tests included in release preparation? @@ -60,4 +60,4 @@ The single_include/nlohmann/json.hpp is the single and only component of the lib * Do all dependencies comply with acceptable licensing terms? - Answer: Yes. \ No newline at end of file + Answer: Yes, the library is licensed under MIT License . \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Misbehaviours.md b/TSF/trustable/assertions/TA-Misbehaviours.md index 53393e477a..31a4c65c2d 100644 --- a/TSF/trustable/assertions/TA-Misbehaviours.md +++ b/TSF/trustable/assertions/TA-Misbehaviours.md @@ -2,48 +2,48 @@ * How has the list of misbehaviours varied over time? - Answer: + Answer: The list of misbehaviours is collected using github and its development is thereby understandable. * How confident can we be that this list is comprehensive? - Answer: + Answer: Due to the collaborative nature of the open source community, we deem it quite unlikely that there are any known misbehaviours which are not reported to the repository nlohmann/json. * How well do the misbehaviours map to the expectations? - Answer: + Answer: There are no identified misbehaviours that tangent the expectations. * Could some participants have incentives to manipulate information? - Answer: + Answer: We could not think of an incentive that any collaborateur could have to manipulate the information. * Could there be whole categories of misbehaviours still undiscovered? - Answer: + Answer: Due to the wide use and long-standing development of the library it is quite unlikely that any major misbehaviors, in particular regarding the parsing and validating of JSON data in the sense of RFC-8259, is undiscovered. * Can we identify misbehaviours that have been understood but not specified? - Answer: + Answer: No. * Can we identify some new misbehaviours, right now? - Answer: + Answer: No. * Is every misbehaviour represented by at least one fault induction test? - Answer: + Answer: Since there are no misbehaviours that concern the use within S-CORE, no. * Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately? - Answer: + Answer: ?????? No. * Are all the fault induction results actually collected? - Answer: + Answer: ?????? No. * Are the results evaluated? - Answer: + Answer: ?????? No. * Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations? - Answer: \ No newline at end of file + Answer: Currently, there is no analysis which identifies additional misbehaviours. The only such analysis is indirectly via the analysis of the fuzz testing, which currently does not identifies additional misbehaviours. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Supply-Chain.md b/TSF/trustable/assertions/TA-Supply-Chain.md index 2013fcc0bb..0c3df92e7c 100644 --- a/TSF/trustable/assertions/TA-Supply-Chain.md +++ b/TSF/trustable/assertions/TA-Supply-Chain.md @@ -2,19 +2,19 @@ * Could there be other components, missed from the list? - Answer: No. + Answer: Since the library does not contain any external components, no. * Does the list include all toolchain components? - Answer: Yes. + Answer: Since the library does not contain any external components, yes. * Does the toolchain include a bootstrap? - Answer: No. + Answer: ???? No. * Could the content of a mirrored project be compromised by an upstream change? - Answer: No. + Answer: Since the library does not contain any external components, no. * Are mirrored projects up to date with the upstream project? diff --git a/TSF/trustable/assertions/TA-Tests.md b/TSF/trustable/assertions/TA-Tests.md index 0958f7b00f..a376635c63 100644 --- a/TSF/trustable/assertions/TA-Tests.md +++ b/TSF/trustable/assertions/TA-Tests.md @@ -2,11 +2,11 @@ * How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible? - Answer: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO) + Answer: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO, cf. AOU-14) * Are any exceptions identified, documented and justified? - Answer: Not applicable. + Answer: To the best of our understanding, there are no exceptions identified. * How confident are we that all test components are taken from within our controlled environment? @@ -22,4 +22,4 @@ * How confident are we that all tests scenarios are repeatable? - Answer: Very. \ No newline at end of file + Answer: All test scenarios are repeated daily in the CI pipeline. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Updates.md b/TSF/trustable/assertions/TA-Updates.md index 33e34b9a1c..72da18b81a 100644 --- a/TSF/trustable/assertions/TA-Updates.md +++ b/TSF/trustable/assertions/TA-Updates.md @@ -14,7 +14,7 @@ * Does change control capture all potential regressions? - Answer: NO CLUE + Answer: Due to the test coverage of 99.186%, it is unlikely that a potential regression is not captured. * Is change control timely enough? diff --git a/TSF/trustable/assertions/TA-Validation.md b/TSF/trustable/assertions/TA-Validation.md index 09a6d70fd6..7867a5e0e2 100644 --- a/TSF/trustable/assertions/TA-Validation.md +++ b/TSF/trustable/assertions/TA-Validation.md @@ -4,52 +4,52 @@ I DO NOT FEEL CONFIDENT TO ANSWER THIS! * Is the selection of tests correct? - Answer: + Answer: ???? Who could tell this? * Are the tests executed enough times? - Answer: + Answer: ???? Define "enough times" * How confident are we that all test results are being captured? - Answer: + Answer: ???? How fine-grained is a test-result supposed to be? * Can we look at any individual test result, and establish what it relates to? - Answer: + Answer: ???? * Can we trace from any test result to the expectation it relates to? - Answer: + Answer: No, there are more tests than expectations, and in particular tests that relate to the inner workings of the library which are not used by S-CORE. * Can we identify precisely which environment (software and hardware) were used? - Answer: + Answer: ???? How precisely shall that be? Moreover, the tests are supposed to run independent of underlying hardware, since this is a software. * How many pass/fail results would be expected, based on the scheduled tests? - Answer: + Answer: Zero fails. * Do we have all of the expected results? - Answer: + Answer: Yes. * Do we have time-series data for all of those results? - Answer: + Answer: Yes, there are time-series data. * If there are any gaps, do we understand why? - Answer: + Answer: ???? Define gaps * Are the test validation strategies credible and appropriate? - Answer: + Answer: ???? Define test validation strategies * What proportion of the implemented tests are validated? - Answer: + Answer: ???? None. * Have the tests been verified using known good and bad data? - Answer: \ No newline at end of file + Answer: ???? \ No newline at end of file From 800c31b9b96673ae8bd9974b8d140880e5839b6a Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 20 Oct 2025 13:50:32 +0000 Subject: [PATCH 24/67] even more minor tweak --- TSF/trustable/assertions/TA-Analysis.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-Analysis.md index 028c4f23ef..77dd636316 100644 --- a/TSF/trustable/assertions/TA-Analysis.md +++ b/TSF/trustable/assertions/TA-Analysis.md @@ -49,7 +49,7 @@ * Are all underlying assumptions and target conditions for the analysis specified? - Answer: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. + Answer: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. In case any test fails ever, the failure of the CI-pipeline encourages the maintainer to investigate. * Have the underlying assumptions been verified using known good data? @@ -57,7 +57,7 @@ * Has the Misbehaviour identification process been verified using known bad data? - Answer: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour. + Answer: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour, enabling everyone to verify the identified misbehaviours. There is, however, no automatic process for the identification of misbehaviours. * Are results shown to be reproducible? From 1afd24545895a67f5b54f8e167fa115673e699f5 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Tue, 21 Oct 2025 09:11:55 +0000 Subject: [PATCH 25/67] add update description --- TSF/README.md | 186 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 185 insertions(+), 1 deletion(-) diff --git a/TSF/README.md b/TSF/README.md index 1c88ff011a..c4475d0826 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -6,4 +6,188 @@ The TSF-related additions, such as the Trustable Graph and tooling extensions, a - the report generated by [trudag](https://codethinklabs.gitlab.io/trustable/trustable/trudag/usage.html) is placed in `TSF/docs/generated`. This can either be produced as part of the CI pipeline or manually using `./TSF/scripts/generate_documentation.sh` - the utility scripts in the `TSF/scripts` folder -The TSF graph (including links, nodes and their hashes) is saved in the `.dotstop.dot` file and the trudag extensions including CPP test references are stored in the `.dotstop_extensions` folder since these locations are required by the trudag tool. \ No newline at end of file +The TSF graph (including links, nodes and their hashes) is saved in the `.dotstop.dot` file and the trudag extensions including CPP test references are stored in the `.dotstop_extensions` folder since these locations are required by the trudag tool. + +# Update Concept for the version of nlohmann/json within S-CORE + +## Assumptions of use + +This description of an update process is based on the following structure of the repository WHICH IS NOT THE CASE YET. +It is assumed that the repository possesses a default branch called ``main`` containing the most recent documented version of ``nlohmann/json`` together with its documentation. +Additionally, there is a branch ``develop``, which is **not** intended to mirror the branch of ``nlohmann/json`` with the same name, but instead serves as an in-repository testing ground for changes to either the library or its documentation. +The releases of the documented version are identified by tags on ``main``. +Moreover, the branch protection rules for ``main`` are set as described in the description of the forking process in ``TSF/README.md`` (WIP). + +## Update process of the original nlohmann/json + +The releases of ``nlohmann/json`` are collected on the `Release site ` of the repository ``nlohmann/json``. +Each release announcement is expected to contain the release date, SHA-256 values for json.hpp, include.zip and json.tar.xz, and a brief list containing bug fixes, improvements, further changes and deprecated functions. +The new release is expected to be located within the branch **master**, from where the most recent version can be drawn. + +## Update process of the S-CORE version + +In the following, we shall describe the intricacies of updating the version of ``nlohmann/json`` within Eclipse S-CORE. +This version is not a mere fork of the original master branch of ``nlohmann/json``, but instead enriched with the documentation following the Trustable Software Framework (TSF). +The enrichment with the documentation necessitates some changes to the fork of the original repository. +For the most part, these changes are in-obtrusive, and mere additions. +In particular, the folders ``include`` and ``single-include`` remain unchanged, and should be updated without further adaptations. +In some cases, however, additional tests are run and data are generated and collected, which were not run or generated in the original ``nlohmann/json``, so that obtrusive changes of files were necessary. +For these files, and in particular the workflow files, caution must be exercised, as to not disturb the documentation. +Moreover, some parts of the documentation must be adapted to the new version. + + +### What can not be updated without further precautions? + +* ``cmake/ci.cmake`` + This file defines, in particular, the various custom cmake targets; in particular, the various configurations for the execution of the unit- and integration-tests are defined. + The TSF requires, or, at the very least, strongly encourages us to collect test-results. + In order to do this efficiently, the ctest command is adapted to automatically generate the junit-logs of each test-run. + For this, the option ``--output-junit`` is set with output path ``../my_logs/TARGETNAME_junit.xml``, where TARGETNAME is replaced by the name of the respective cmake target; in case that this convention is insufficient to uniquely identify the logs, TARGETNAME is amended by a number. + When updating, it must be ensured that these adaptations are preserved. + Moreover, if the update introduces new cmake targets or new executions of ctest, it must be ensured, that the junit-log is generated and stored with a similar naming convention in the folder "../my_logs/". + Otherwise, it can not be ensured that the test data are accurately captured. + +* ``cmake/download_test_data.cmake`` + This file is modified to ensure that the test-data are not downloaded from the original test-data repository, but instead from the copy of that repository within the Eclipse S-CORE organisation. + It must be ensured that this change is preserved. + +* ``tests/CMakeLists.txt`` + This file collects, in particular, the files containing the unit- and integration-tests in a list, which is given to cmake. + Custom tests were added in TSF/tests to document the fulfillment of the expectations. + To ensure that these tests are run, the file tests/CMakeLists.txt has been modified. + During the update, it must be ensured, that the custom tests are still being executed. + +* ``.github/workflows/parent-workflow.yml`` + To ensure a specific execution order for the individual github workflows, their execution is orchestrated by the parent-workflow. + To guarantee that this order is respected, it must be ensured that every other workflow except for ``docs-cleanup.yml``, ``scorecards.yml`` and ``stale.yml`` runs ``on workflow_call``, only. + For the three exceptions, it is recommended to keep the execution scheduled as currently the case. + +* ``.github/workflows/ubuntu.yml`` + The ubuntu workflow orchestrates the parallel execution of various cmake targets with varying configurations running on the latest version of ubuntu. + The first adaptation is that every step, in which a junit-report is generated, generates an artifact. + It must be ensured, that these artifacts are still generated after the update. + The second adaptation is that the test-results are captured, processed and persistently stored or stored in the ubuntu-artifact. + Therefore, it must be ensured that the jobs ``publish_test_data_success``, ``publish_test_data_failure``, ``publish_test_data_cancellation`` and ``ubuntu_artifact`` are executed. + Moreover, in case that any further job is added by nlohmann, it must be ensured that this job is added to the list of jobs required before the latter workflows are executed. + If any further job added by nlohmann generates a junit-log, it must be ensured that this job generates an artifact containing its junit-logs. + +* ``.github/workflows/cifuzz.yml`` + This workflow uses Google's oss-fuzz, which is not available to the copy within Eclipse S-CORE. + Therefore, this workflow needs to be disabled in the copy. + Currently, this is done by removing it altogether, which we recommend to do so that no confusion as to why this workflow is not executed arises. + +* ``.github/workflows/publish_documentation.yml`` + This workflow is replaced with a completely customised version, which reflects the use of trudag and the integration into the Eclipse S-CORE organisation. + Therefore, it is recommended to not change this workflow. + In particular, the version of publish_documentation.yml in the original repository nlohmann/json must not replace the publish_documentation.yml of the present repository. + +* ``.github/workflows/test_trudag_extensions.yml`` + This workflow is not present in the original nlohmann/json and must not be removed, or modified (besides updating the versions of tools, if necessary) by the update. + +* Other entries of ``.github/workflows`` + For every workflow, it must be ensured that the conditions of their execution are unchanged. + The workflows ``check_amalgamation``, ``codeql``, ``dependency_review``, ``labeler`` and ``test_trudag_extensions`` generate an artifact, which must not be changed. + New workflows should be carefully reviewed. + If it is determined that their execution within the project is beneficial, and that they do not interfere with, then they should be integrated within the parent workflow at an appropriate place and their execution condition should be set to on ``workflow``, or their execution should be scheduled appropriately. + It is strongly recommended that the new workflow produces an artifact on success, and that the validator ``check_artifact_exists`` is adapted accordingly. + If nlohmann deletes any of the currently executed workflows, in particular ``check_amalgamation.yml``, ``codeql.yml``, ``dependency_review.yml``, ``labeler.yml``, ``test_trudag_extensions.yml`` and ``ubuntu.yml``, then it is strongly recommended to keep the currently executed version, since the automatic validator ``check_artifact_exists`` depends on the existence of these workflows. + In case that it is determined that these workflows should be deleted also in the documented copy of ``nlohmann/json``, then the validator ``check_artifact_exists`` and all its occurrences must be adapted accordingly. + +* ``ChangeLog.md`` + It must be ensured that the changes of the update are properly described in the file ``ChangeLog.md``. + + +### Necessary adaptations + +The following adaptation is recommended, and has, unfortunately, not been automated. + +* ``TSF/trustable/statements/JLS-02.md`` + It must be carefully ensured that this statement and its references are still valid. In particular, it is strongly recommended to refer to a fuzz testing result running on the version that is updated to. + + +The following adaptations to the documentation have been automated; the python-script TSF/scripts/update_helper.py may be used to assist with these changes. +For the error-free execution is it necessary, however, to adhere to the naming scheme json_version_X_XX_X, and to not change the structure of the directories. + +* ``TSF/Trustable/statements/JLS-11.md`` + It must be ensured that the correct release date is used. + +* ``TSF/trustable/statements/JLS-14.md`` + It must be ensured that the release of the correct version is referenced. + Furthermore, the sha-value of the evidence must be adapted to the one provided in that announcement post. + +* ``TSF/trustable/docs/introduction/index.rst`` + In this file, the version of ``nlohmann/json`` that is documented is explicitly mentioned at two places. + This version must be updated. + +* ``TSF/scripts/generate_list_of_misbehaviours.py`` + This script contains version and release date hard-coded. Both must be updated. + + +### Recommended procedure VERY MUCH WIP + +Based on the above observations, the following recommendations are derived. + +1. Create a new branch ``json_version_X_XX_X`` from the default branch containing the current version of ``nlohmann/json`` within Eclipse S-CORE +2. Merge branch master from the original nlohmann/json into this branch, e.g. ``git checkout -b json_version_X_XX_X && git merge --no-commit nlohmann/master`` +3. Confirm the deletion of cifuzz.yml, macos.yml and windows.yml. +4. Resolve the potential merge conflict in publish-documentation.yml by rejecting the incoming changes. Update the versions of the github actions, if necessary. +5. Resolve the potential merge conflicts in check_amalgamation.yml, codeql.yml, dependency_review.yml, labeler.yml, ``test_trudag_extensions.yml`` to ensure that the artifacts are generated, i.e. the jobs Generate XXX artifact and Upload XXX artifact are retained. +6. Resolve the potential merge conflict in ubuntu.yml following the above instructions. +7. Resolve the potential merge conflicts in cmake/download_test_data.cmake and cmake/ci.cmake following the above instructions. +8. Carefully examine the automatically merged changes. If no interference is to be expected, complete the merge. +9. In case any additional workflow has been added, carefully examine and integrate into the parent-workflow or schedule appropriately. +10. Adapt the documentation as described above. +11. Generate the documentation locally and carefully investigate any change in the trustable score(s). +12. Merge into the default branch. +13. Create a new release. + +# Update concept for the documentation + +## Assumptions of use + +The documentation follows the Trustable Software Framework (TSF), which is documented [here](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html). +Furthermore, the automatic compilation of the documentation and the tracking of changes to the core functionalities of the library uses _trudag_, which is developed by Codethink and located [here](https://gitlab.com/CodethinkLabs/trustable/trustable). + + +## Version of trudag + +The documentation is currently built using trudag version 2025.8.5. +In case a major change of the trudag happens in the future, this might break some features of the documentation, or change some intended behaviours. +Thus, it is recommended to not change the version of trudag. +In case that it appears wise or necessary to change the version of trudag (e.g. when trudag is eventually certified), the following should be considered: + +* How has the algorithm for the accumulation of the trustable score changed? Ideally, it does not change, otherwise the necessity for a new review arises. +* How has the data store interface changed? Ideally, it has not changed, but historical data and the documentation indicate that a change of the data store interface happened at some time. +* How has the the expected configuration for the items changed? It is known that this configuration changed (at least) once before. What does the potential change mean? +* Do all custom references and validators as well as the data store interface work as before? +* Has the algorithm for the hashing changed, or are there any changes to the trustable scores? If so, investigate carefully! + + +## Subject-Matter-Expert-scores + +The intention with the SME scores is to find the _true_ trustable score by means of a heuristic law-of-large-numbers argument. +Therefore, it is very much welcome if contributors add their SME scores to statements for which they feel confident to do so. +While the committer may check SME scores for plausibility, it is recommended to not question SME scores as this interferes with the assumed independence of the SME! +It is recommended that changes to SME scores are accumulated in the branch ``develop`` before the release of a new version of the documentation as to not clutter the release history. +It is highly recommended to not delete SME scores under usual circumstances; most certainly, the SME scores should never be changed by anybody except the original SME. +The following unusual circumstances can, after careful consideration, justify the removal or (much preferably!) the request for re-evaluation by the original SMEs: + +* change of references: + If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) change, then this should trigger a re-evaluation of the statement. + In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. +* addition of automatic validators: + Recall that the SME judges in the absence of an automatic validator the validity of the statement using their own knowledge as well as the provided references, while in the presence of an automatic validator the validity of the validator score to represent the true score of the item is judged. + If a new automatic validator is added, then the meaning of the old SME scores is no longer represented, thereby urging for a re-review or (if a re-review is impossible) the removal of the score. + +## Validators + +The automatic validators are intended to calculate a trustable score based on quantifiable data. +In particular the introduction of a validator changes the meaning of the (potential) SME scores associated to a statement. +Therefore, the change or introduction of an automatic validator is most critical. +It is highly recommended to urge the original SME to re-review the statement and adapt their scores, or (at the least) to enlist additional SME to judge the changed statement. +After careful consideration the highly critical decision to remove some SME scores no longer reflecting the statement could be made. + +## References + +References should be treated as validators, i.e. any update of a reference should trigger a re-review by the SME. +For references, however, the decision to remove a stale SME score is even more critical unless the reference reveals critical new information, which is highly unlikely, or the change of the reference is triggered by a significant change in the behaviour of the library, which heavily affected the statement. \ No newline at end of file From b49bf1a2a69d47f747ef3bced09a8171008d8b77 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Tue, 21 Oct 2025 11:45:09 +0000 Subject: [PATCH 26/67] minor tweaks for nlohmann update --- TSF/README.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/TSF/README.md b/TSF/README.md index c4475d0826..e485a40df7 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -18,6 +18,9 @@ Additionally, there is a branch ``develop``, which is **not** intended to mirror The releases of the documented version are identified by tags on ``main``. Moreover, the branch protection rules for ``main`` are set as described in the description of the forking process in ``TSF/README.md`` (WIP). +Note that there is **no automatic information** on the existence of a new release in the original ``nlohmann/json``; instead the possibility to update is detected **manually**. +Note further that, due to the currently relatively limited use of nlohmann/json within S-CORE, there appears currently no inherent need to keep the version up to date. + ## Update process of the original nlohmann/json The releases of ``nlohmann/json`` are collected on the `Release site ` of the repository ``nlohmann/json``. @@ -28,6 +31,7 @@ The new release is expected to be located within the branch **master**, from whe In the following, we shall describe the intricacies of updating the version of ``nlohmann/json`` within Eclipse S-CORE. This version is not a mere fork of the original master branch of ``nlohmann/json``, but instead enriched with the documentation following the Trustable Software Framework (TSF). + The enrichment with the documentation necessitates some changes to the fork of the original repository. For the most part, these changes are in-obtrusive, and mere additions. In particular, the folders ``include`` and ``single-include`` remain unchanged, and should be updated without further adaptations. @@ -127,21 +131,25 @@ For the error-free execution is it necessary, however, to adhere to the naming s Based on the above observations, the following recommendations are derived. -1. Create a new branch ``json_version_X_XX_X`` from the default branch containing the current version of ``nlohmann/json`` within Eclipse S-CORE +1. Ensure that the content of the branch ``develop`` is identical to the branch ``main``. + Since it is intended to not change the library itself, in particular the folders ``include`` and ``single_include``, this should be possible by updating the documentation. 2. Merge branch master from the original nlohmann/json into this branch, e.g. ``git checkout -b json_version_X_XX_X && git merge --no-commit nlohmann/master`` 3. Confirm the deletion of cifuzz.yml, macos.yml and windows.yml. -4. Resolve the potential merge conflict in publish-documentation.yml by rejecting the incoming changes. Update the versions of the github actions, if necessary. -5. Resolve the potential merge conflicts in check_amalgamation.yml, codeql.yml, dependency_review.yml, labeler.yml, ``test_trudag_extensions.yml`` to ensure that the artifacts are generated, i.e. the jobs Generate XXX artifact and Upload XXX artifact are retained. +4. Resolve the potential merge conflict in publish-documentation.yml by rejecting the incoming changes. + Update the versions of the github actions, if necessary. +5. Resolve the potential merge conflicts in check_amalgamation.yml, codeql.yml, dependency_review.yml, labeler.yml, ``test_trudag_extensions.yml`` to ensure that the artifacts are generated, i.e. the jobs ``Generate XXX artifact`` and ``Upload XXX artifact`` are retained. 6. Resolve the potential merge conflict in ubuntu.yml following the above instructions. 7. Resolve the potential merge conflicts in cmake/download_test_data.cmake and cmake/ci.cmake following the above instructions. 8. Carefully examine the automatically merged changes. If no interference is to be expected, complete the merge. 9. In case any additional workflow has been added, carefully examine and integrate into the parent-workflow or schedule appropriately. 10. Adapt the documentation as described above. -11. Generate the documentation locally and carefully investigate any change in the trustable score(s). +11. Generate the documentation locally and carefully investigate any change in the trustable score(s). + If any relevant behaviour of the library changes, adapt the documentation. + Additionally, if any additional tests were added, or existing tests were changed, carefully investigate whether these warrant an amendment of the documentation. 12. Merge into the default branch. -13. Create a new release. +13. Create a new release under the tag vX.XX.X-trustable.1. -# Update concept for the documentation +# Update concept for the TSF documentation ## Assumptions of use @@ -176,7 +184,7 @@ The following unusual circumstances can, after careful consideration, justify th If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) change, then this should trigger a re-evaluation of the statement. In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. * addition of automatic validators: - Recall that the SME judges in the absence of an automatic validator the validity of the statement using their own knowledge as well as the provided references, while in the presence of an automatic validator the validity of the validator score to represent the true score of the item is judged. + Recall that the SME judges in the absence of an automatic validator the validity of the statement using their own knowledge as well as the provided references, while in the presence of an automatic validator the validity of the validator score to represent the true score of the item, which is estimated as in the case of no validator, is judged. If a new automatic validator is added, then the meaning of the old SME scores is no longer represented, thereby urging for a re-review or (if a re-review is impossible) the removal of the score. ## Validators From 29dd86e4c407a9b6a5d4132045beacd541fbefcf Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Tue, 21 Oct 2025 15:05:49 +0000 Subject: [PATCH 27/67] fix git command --- .dotstop_extensions/validators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index b270c6a741..a6efffc853 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -351,7 +351,7 @@ def is_branch_protected(configuration: dict[str, yaml]) -> tuple[float, list[Exc branch = configuration.get("branch",None) if branch is None: return (0.0, RuntimeError("The branch is not specified.")) - res = subprocess.run(["git", "diff", "--cached", "quiet"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) + res = subprocess.run(["git", "diff", "--cached", "--quiet", "--exit-code"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) if res.returncode != 0: raise RuntimeError("There are currently staged changes. Please unstage to proceed.") try: From 8f3f0cefc65b5686f20aace7c6b395a1058f70c6 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Wed, 22 Oct 2025 08:14:05 +0000 Subject: [PATCH 28/67] remove trailing whitespace --- TSF/tests/unit-byte_order_mark.cpp | 4 +- TSF/tests/unit-class_parser_core.cpp | 10 ++--- TSF/tests/unit-literals.cpp | 57 ++-------------------------- TSF/tests/unit-numbers.cpp | 27 ++++--------- TSF/tests/unit-objects.cpp | 16 ++++---- TSF/tests/unit-strings.cpp | 35 ++++------------- 6 files changed, 34 insertions(+), 115 deletions(-) diff --git a/TSF/tests/unit-byte_order_mark.cpp b/TSF/tests/unit-byte_order_mark.cpp index e3a8e14ba9..b37aaa36f8 100644 --- a/TSF/tests/unit-byte_order_mark.cpp +++ b/TSF/tests/unit-byte_order_mark.cpp @@ -20,7 +20,7 @@ TEST_CASE("accept") { SECTION("single BOM") { - // a single byte order mark is treated as an empty token, which is not a valid json token. + // a single byte order mark is treated as an empty token, which is not a valid json token. CHECK(!json::accept("\xEF\xBB\xBF")); CHECK(json::accept("\xEF\xBB\xBF\n\"foo\"")); CHECK(json::accept("\xEF\xBB\xBF\"foo\"")); @@ -72,7 +72,7 @@ TEST_CASE("parse") { SECTION("multiple BOM") { - // Whenever a fourth character of a BOM-candidate is read, an error is thrown. + // Whenever a fourth character of a BOM-candidate is read, an error is thrown. // This error does not depend on any trailing garbage. CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); diff --git a/TSF/tests/unit-class_parser_core.cpp b/TSF/tests/unit-class_parser_core.cpp index 9bf171e76c..bb908f0a44 100644 --- a/TSF/tests/unit-class_parser_core.cpp +++ b/TSF/tests/unit-class_parser_core.cpp @@ -8,10 +8,10 @@ /* This file has been adapted from the original nlohmann/json library (tests/src/unit-class_parser.cpp) -to use the plain json::accept() and json::parse() functions instead of advanced helper functions, -which make many additional function calls (see the definitions of parse_helper and accept_helper in -tests/src/unit-class_parser.cpp). This allows to directly attribute a test result to the accept() or -parse() function, which is needed to use the test results as evidence for the well-formedness and parsing +to use the plain json::accept() and json::parse() functions instead of advanced helper functions, +which make many additional function calls (see the definitions of parse_helper and accept_helper in +tests/src/unit-class_parser.cpp). This allows to directly attribute a test result to the accept() or +parse() function, which is needed to use the test results as evidence for the well-formedness and parsing of JSON requirements. Unnecessary code and test sections have been removed. */ @@ -34,7 +34,7 @@ bool accept_helper(const std::string& s); json parser_helper(const std::string& s) { - return json::parse(s); + return json::parse(s); } bool accept_helper(const std::string& s) diff --git a/TSF/tests/unit-literals.cpp b/TSF/tests/unit-literals.cpp index c38570dd44..defa7525df 100644 --- a/TSF/tests/unit-literals.cpp +++ b/TSF/tests/unit-literals.cpp @@ -62,7 +62,7 @@ TEST_CASE("accept") CHECK(!json::accept("NULL")); } SECTION("false") - { + { CHECK(!json::accept("False")); CHECK(!json::accept("fAlse")); CHECK(!json::accept("FAlse")); @@ -1075,57 +1075,6 @@ TEST_CASE("accept") CHECK(!json::accept("NAN")); } } - SECTION("whitespace") - { - CHECK(json::accept(" false ")); - CHECK(json::accept(" false\t")); - CHECK(json::accept(" false\n")); - CHECK(json::accept(" false\u000d")); - CHECK(json::accept("\tfalse ")); - CHECK(json::accept("\tfalse\t")); - CHECK(json::accept("\tfalse\n")); - CHECK(json::accept("\tfalse\u000d")); - CHECK(json::accept("\nfalse ")); - CHECK(json::accept("\nfalse\t")); - CHECK(json::accept("\nfalse\n")); - CHECK(json::accept("\nfalse\u000d")); - CHECK(json::accept("\u000dfalse ")); - CHECK(json::accept("\u000dfalse\t")); - CHECK(json::accept("\u000dfalse\n")); - CHECK(json::accept("\u000dfalse\u000d")); - CHECK(json::accept(" null ")); - CHECK(json::accept(" null\t")); - CHECK(json::accept(" null\n")); - CHECK(json::accept(" null\u000d")); - CHECK(json::accept("\tnull ")); - CHECK(json::accept("\tnull\t")); - CHECK(json::accept("\tnull\n")); - CHECK(json::accept("\tnull\u000d")); - CHECK(json::accept("\nnull ")); - CHECK(json::accept("\nnull\t")); - CHECK(json::accept("\nnull\n")); - CHECK(json::accept("\nnull\u000d")); - CHECK(json::accept("\u000dnull ")); - CHECK(json::accept("\u000dnull\t")); - CHECK(json::accept("\u000dnull\n")); - CHECK(json::accept("\u000dnull\u000d")); - CHECK(json::accept(" true ")); - CHECK(json::accept(" true\t")); - CHECK(json::accept(" true\n")); - CHECK(json::accept(" true\u000d")); - CHECK(json::accept("\ttrue ")); - CHECK(json::accept("\ttrue\t")); - CHECK(json::accept("\ttrue\n")); - CHECK(json::accept("\ttrue\u000d")); - CHECK(json::accept("\ntrue ")); - CHECK(json::accept("\ntrue\t")); - CHECK(json::accept("\ntrue\n")); - CHECK(json::accept("\ntrue\u000d")); - CHECK(json::accept("\u000dtrue ")); - CHECK(json::accept("\u000dtrue\t")); - CHECK(json::accept("\u000dtrue\n")); - CHECK(json::accept("\u000dtrue\u000d")); - } } TEST_CASE("parse") @@ -1185,7 +1134,7 @@ TEST_CASE("parse") CHECK(json::parse("\u000dtrue ")==json::parse("true")); CHECK(json::parse("\u000dtrue\t")==json::parse("true")); CHECK(json::parse("\u000dtrue\n")==json::parse("true")); - CHECK(json::parse("\u000dtrue\u000d")==json::parse("true")); + CHECK(json::parse("\u000dtrue\u000d")==json::parse("true")); } SECTION("capitalisation") { @@ -1226,7 +1175,7 @@ TEST_CASE("parse") CHECK_THROWS_AS(parser_helper("NULL"),json::parse_error&); } SECTION("false") - { + { CHECK_THROWS_AS(parser_helper("False"),json::parse_error&); CHECK_THROWS_AS(parser_helper("fAlse"),json::parse_error&); CHECK_THROWS_AS(parser_helper("FAlse"),json::parse_error&); diff --git a/TSF/tests/unit-numbers.cpp b/TSF/tests/unit-numbers.cpp index 12724fa44d..f32b7fb4f1 100644 --- a/TSF/tests/unit-numbers.cpp +++ b/TSF/tests/unit-numbers.cpp @@ -20,7 +20,7 @@ TEST_CASE("accept") // The only valid exponents are U+0065 and U+0045. // Their look-alikes, in particular U+0425 and U+0436 are forbidden. SECTION("U+0425") - { + { CHECK(!json::accept("0\u0425123")); CHECK(!json::accept("123\u04250")); CHECK(!json::accept("0.123\u0425123")); @@ -28,7 +28,7 @@ TEST_CASE("accept") CHECK(!json::accept("1.23\u04250")); } SECTION("U+0436") - { + { CHECK(!json::accept("0\u0436123")); CHECK(!json::accept("123\u04360")); CHECK(!json::accept("0.123\u0436123")); @@ -63,7 +63,6 @@ TEST_CASE("accept") } SECTION("minus") { - CHECK(!json::accept("1-1")); CHECK(!json::accept("0.1-1")); CHECK(!json::accept("0.1-1.0")); @@ -75,7 +74,6 @@ TEST_CASE("accept") } SECTION("brackets") { - CHECK(!json::accept("(145)")); CHECK(!json::accept("(34.32874)")); CHECK(!json::accept("42\u0045(134)")); @@ -83,12 +81,10 @@ TEST_CASE("accept") } SECTION("factorial") { - CHECK(!json::accept("13!")); } SECTION("multiplication") { - CHECK(!json::accept("1*1")); CHECK(!json::accept("1.45*5")); CHECK(!json::accept("154*23.76")); @@ -99,7 +95,6 @@ TEST_CASE("accept") } SECTION("division") { - CHECK(!json::accept("0/0")); CHECK(!json::accept("1.45/5")); CHECK(!json::accept("154/23.76")); @@ -110,7 +105,6 @@ TEST_CASE("accept") } SECTION("comma") { - CHECK(!json::accept("0,0")); CHECK(!json::accept("100,000")); CHECK(!json::accept("1,000.23")); @@ -283,7 +277,7 @@ TEST_CASE("accept") CHECK(!json::accept("-0000000000000000000000000000000000042")); } // According to RFC8259, only numbers in base ten are allowed. For bases lower than ten, this can - // not be checked using the numerical representation and checking the grammar, assuming that the + // not be checked using the numerical representation and checking the grammar, assuming that the // standard digits are used; instead, this is the job of the parser. // For bases exceeding ten, this can be checked. In particular hexadecimal can be tested for. // For base eight, this can also be tested assuming that one of the conventions for the @@ -322,14 +316,14 @@ TEST_CASE("accept") TEST_CASE("parse") { - // While leading zeroes are forbidden according to RFC8259, + // While leading zeroes are forbidden according to RFC8259, // leading zeroes in the exponent are allowed and ignored in the parsing. SECTION("exponents") { // The only valid exponents are U+0065 and U+0045. // Their look-alikes, in particular U+0425 and U+0436 are forbidden. SECTION("U+0425") - { + { CHECK_THROWS_AS(parser_helper("0\u0425123"),json::parse_error&); CHECK_THROWS_AS(parser_helper("123\u04250"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.123\u0425123"),json::parse_error&); @@ -337,7 +331,7 @@ TEST_CASE("parse") CHECK_THROWS_AS(parser_helper("1.23\u04250"),json::parse_error&); } SECTION("U+0436") - { + { CHECK_THROWS_AS(parser_helper("0\u0436123"),json::parse_error&); CHECK_THROWS_AS(parser_helper("123\u04360"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.123\u0436123"),json::parse_error&); @@ -384,7 +378,6 @@ TEST_CASE("parse") } SECTION("minus") { - CHECK_THROWS_AS(parser_helper("1-1"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.1-1"),json::parse_error&); CHECK_THROWS_AS(parser_helper("0.1-1.0"),json::parse_error&); @@ -396,7 +389,6 @@ TEST_CASE("parse") } SECTION("brackets") { - CHECK_THROWS_AS(parser_helper("(145)"),json::parse_error&); CHECK_THROWS_AS(parser_helper("(34.32874)"),json::parse_error&); CHECK_THROWS_AS(parser_helper("42\u0045(134)"),json::parse_error&); @@ -408,7 +400,6 @@ TEST_CASE("parse") } SECTION("multiplication") { - CHECK_THROWS_AS(parser_helper("1*1"),json::parse_error&); CHECK_THROWS_AS(parser_helper("1.45*5"),json::parse_error&); CHECK_THROWS_AS(parser_helper("154*23.76"),json::parse_error&); @@ -419,7 +410,6 @@ TEST_CASE("parse") } SECTION("division") { - CHECK_THROWS_AS(parser_helper("0/0"),json::parse_error&); CHECK_THROWS_AS(parser_helper("1.45/5"),json::parse_error&); CHECK_THROWS_AS(parser_helper("154/23.76"),json::parse_error&); @@ -430,14 +420,13 @@ TEST_CASE("parse") } SECTION("comma") { - CHECK_THROWS_AS(parser_helper("0,0"),json::parse_error&); CHECK_THROWS_AS(parser_helper("100,000"),json::parse_error&); CHECK_THROWS_AS(parser_helper("1,000.23"),json::parse_error&); } } SECTION("trailing zeroes") - { + { // Trailing zeroes after the decimal point do not influence the parsing CHECK(json::parse("3.1415000000000000000000000")==json::parse("3.1415")); CHECK(json::parse("3.1415000000000\u004515")==json::parse("3.1415\u004515")); @@ -543,7 +532,7 @@ TEST_CASE("parse") } SECTION("Precision") { - CHECK(json::parse("1.7976931348623158e308").dump()=="1.7976931348623157e+308"); // maximum double value + CHECK(json::parse("1.7976931348623158e308").dump()=="1.7976931348623157e+308"); // maximum double value CHECK(json::parse("-1.7976931348623158e308").dump()=="-1.7976931348623157e+308"); // minimum double value } } diff --git a/TSF/tests/unit-objects.cpp b/TSF/tests/unit-objects.cpp index f5c2610767..eca7ba403b 100644 --- a/TSF/tests/unit-objects.cpp +++ b/TSF/tests/unit-objects.cpp @@ -16,7 +16,7 @@ void parser_helper(const std::string& input){ TEST_CASE("accept") { // A name (or key) is a string. No other token is a valid name - // See also n_object_missing_key.json, n_object_non_string_key.json, + // See also n_object_missing_key.json, n_object_non_string_key.json, // n_object_non_string_key_but_huge_number_instead.json, n_object_repeated_null_null // n_object_unquoted_key for some non-exhaustive tests SECTION("names") @@ -88,7 +88,7 @@ TEST_CASE("accept") CHECK(json::accept("{\"foo\\u001fbar\":123}")); } SECTION("unicode") - { + { // escaped CHECK(json::accept("{\"\\u0066\\u006f\\u006f\\u0062\\u0061\\u0072\":123}")); // unescaped @@ -109,7 +109,7 @@ TEST_CASE("accept") } } } - // Name/key and value of an array are treated as any other token. + // Name/key and value of an array are treated as any other token. // In particular, leading and trailing whitespace are ignored SECTION("whitespace") { @@ -129,7 +129,7 @@ TEST_CASE("accept") CHECK(json::accept("{\u000d\"foo\"\u000d:\u000d\"bar\"\u000d}")); CHECK(json::accept("{ \"foo\"\t:\n\"bar\"\n}")); CHECK(json::accept("{\t\t\t\t\t\n\n\u000d\"foo\"\t \t\t \n\n \u000d:\"bar\"}")); - } + } } // The colon U+003A is the only valid member separator. // Look-alikes are illegal. @@ -137,7 +137,7 @@ TEST_CASE("accept") SECTION("member separator") { CHECK(json::accept("{\"foo\"\u003a\"bar\"}")); //: - CHECK(!json::accept("{\"foo\"\uff1a\"bar\"}")); + CHECK(!json::accept("{\"foo\"\uff1a\"bar\"}")); CHECK(!json::accept("{\"foo\"\ua789\"bar\"}")); CHECK(!json::accept("{\"foo\"\u005b\"bar\"}")); //[ CHECK(!json::accept("{\"foo\"\u007b\"bar\"}")); //{ @@ -169,14 +169,14 @@ TEST_CASE("parse") CHECK(json::parse("{ \"foo\"\t:\n\"bar\"\n}")==json::parse("{\"foo\":\"bar\"}")); CHECK(json::parse("{\t\t\t\t\t\n\n\u000d\"foo\"\t \t\t \n\n \u000d:\"bar\"}")==json::parse("{\"foo\":\"bar\"}")); } - } + } // The colon U+003A is the only valid member separator. // Look-alikes are illegal. // All other valid structural characters are illegal. SECTION("member separator") { CHECK_NOTHROW(parser_helper("{\"foo\"\u003a\"bar\"}")); //: - CHECK_THROWS_AS(parser_helper("{\"foo\"\uff1a\"bar\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{\"foo\"\uff1a\"bar\"}"),json::parse_error&); CHECK_THROWS_AS(parser_helper("{\"foo\"\ua789\"bar\"}"),json::parse_error&); CHECK_THROWS_AS(parser_helper("{\"foo\"\u005b\"bar\"}"),json::parse_error&); //[ CHECK_THROWS_AS(parser_helper("{\"foo\"\u007b\"bar\"}"),json::parse_error&); //{ @@ -249,6 +249,6 @@ TEST_CASE("parse") ss2 >> _2; ss3 >> _3; CHECK(_2==_3); - } + } } } diff --git a/TSF/tests/unit-strings.cpp b/TSF/tests/unit-strings.cpp index 830ad4372f..e0ad653691 100644 --- a/TSF/tests/unit-strings.cpp +++ b/TSF/tests/unit-strings.cpp @@ -10,7 +10,7 @@ using nlohmann::json; namespace { void parser_helper(const std::string& input); -std::string uint_to_utf8(const uint32_t& input); +std::string uint_to_utf8(const uint32_t& input); void parser_helper(const std::string& input){ const json temp = json::parse(input); @@ -243,25 +243,6 @@ TEST_CASE("accept") CHECK(!json::accept("\"\xfe\xfe\xff\xff\"")); } } - SECTION("whitespace") - { - CHECK(json::accept(" \"foo\" ")); - CHECK(json::accept(" \"foo\"\t")); - CHECK(json::accept(" \"foo\"\n")); - CHECK(json::accept(" \"foo\"\u000d")); - CHECK(json::accept("\t\"foo\" ")); - CHECK(json::accept("\t\"foo\"\t")); - CHECK(json::accept("\t\"foo\"\n")); - CHECK(json::accept("\t\"foo\"\u000d")); - CHECK(json::accept("\n\"foo\" ")); - CHECK(json::accept("\n\"foo\"\t")); - CHECK(json::accept("\n\"foo\"\n")); - CHECK(json::accept("\n\"foo\"\u000d")); - CHECK(json::accept("\u000d\"foo\" ")); - CHECK(json::accept("\u000d\"foo\"\t")); - CHECK(json::accept("\u000d\"foo\"\n")); - CHECK(json::accept("\u000d\"foo\"\u000d")); - } } TEST_CASE("Unicode" * doctest::skip()) @@ -277,8 +258,8 @@ TEST_CASE("Unicode" * doctest::skip()) if (i>=0xD800 && i<=0xDFFF) { // Unpaired utf-16 surrogates are illegal. - // Observe that this verbatim not what RFC8259 §7 prescribes; - // it appears, however, to be in the spirit of RFC8259, cf. §8.2 + // Observe that this verbatim not what RFC8259 §7 prescribes; + // it appears, however, to be in the spirit of RFC8259, cf. §8.2 // Illegal characters are not parsed anyway. CHECK(!json::accept(temp.str())); CHECK(!json::accept(temp2.str())); @@ -297,10 +278,10 @@ TEST_CASE("Unicode" * doctest::skip()) for (uint32_t i = 0x0000; i<=0x10FFFF; i++) { std::string temp = uint_to_utf8(i); - if ((i>=0xD800 && i<=0xDFFF)) { + if ((i>=0xD800 && i<=0xDFFF)) { // Unpaired utf-16 surrogates are illegal. - // Observe that this verbatim not what RFC8259 §7 prescribes; - // it appears, however, to be in the spirit of RFC8259, cf. §8.2 + // Observe that this verbatim not what RFC8259 §7 prescribes; + // it appears, however, to be in the spirit of RFC8259, cf. §8.2 // The other characters are illegal if unescaped. CHECK(!json::accept(temp)); CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); @@ -310,7 +291,7 @@ TEST_CASE("Unicode" * doctest::skip()) CHECK(!json::accept(temp)); CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); } - } + } } else if (i<0x0020||i==0x0022||i==0x005c){ CHECK(!json::accept(temp)); CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); @@ -353,7 +334,7 @@ TEST_CASE("Unicode" * doctest::skip()) CHECK_THROWS_AS(parser_helper(temp.str()),json::parse_error&); } } - } + } } } From da5cf301b7172ab0918799f65ca877691c2eac12 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Wed, 22 Oct 2025 09:27:13 +0000 Subject: [PATCH 29/67] Fix url and branch. --- TSF/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/TSF/README.md b/TSF/README.md index e485a40df7..3902a14f3a 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -23,7 +23,7 @@ Note further that, due to the currently relatively limited use of nlohmann/json ## Update process of the original nlohmann/json -The releases of ``nlohmann/json`` are collected on the `Release site ` of the repository ``nlohmann/json``. +The releases of ``nlohmann/json`` are collected on the [Release site](https://github.com/nlohmann/json/releases) of the repository ``nlohmann/json``. Each release announcement is expected to contain the release date, SHA-256 values for json.hpp, include.zip and json.tar.xz, and a brief list containing bug fixes, improvements, further changes and deprecated functions. The new release is expected to be located within the branch **master**, from where the most recent version can be drawn. @@ -133,7 +133,7 @@ Based on the above observations, the following recommendations are derived. 1. Ensure that the content of the branch ``develop`` is identical to the branch ``main``. Since it is intended to not change the library itself, in particular the folders ``include`` and ``single_include``, this should be possible by updating the documentation. -2. Merge branch master from the original nlohmann/json into this branch, e.g. ``git checkout -b json_version_X_XX_X && git merge --no-commit nlohmann/master`` +2. Merge branch master from the original nlohmann/json into ``develop``, e.g. ``git checkout -b json_version_X_XX_X && git merge --no-commit nlohmann/master`` 3. Confirm the deletion of cifuzz.yml, macos.yml and windows.yml. 4. Resolve the potential merge conflict in publish-documentation.yml by rejecting the incoming changes. Update the versions of the github actions, if necessary. @@ -146,8 +146,8 @@ Based on the above observations, the following recommendations are derived. 11. Generate the documentation locally and carefully investigate any change in the trustable score(s). If any relevant behaviour of the library changes, adapt the documentation. Additionally, if any additional tests were added, or existing tests were changed, carefully investigate whether these warrant an amendment of the documentation. -12. Merge into the default branch. -13. Create a new release under the tag vX.XX.X-trustable.1. +12. Merge into the ``main``. +13. Create a new release under the tag FIXME # Update concept for the TSF documentation From 8c61c4cb1a5944ebae3a2249a7f406b57132cb4c Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Wed, 22 Oct 2025 10:40:26 +0000 Subject: [PATCH 30/67] add combinator --- .dotstop_extensions/validators.py | 60 +++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index a6efffc853..9bf9518f76 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -360,3 +360,63 @@ def is_branch_protected(configuration: dict[str, yaml]) -> tuple[float, list[Exc except: return (1.0, []) +def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + validators = configuration.get("validators",None) + if validators is None: + return (1.0, Warning("No validators were given, returning the void-validator.")) + elif not isinstance(validators,list): + return (0.0, TypeError("The list of validators must be given as list.")) + scores = [] + exceptions = [] + weights = [] + for validator in validators: + # fetch configuration + validator_configuration = validator.get("configuration", None) + if not isinstance(validator_configuration,dict[str, yaml]): + return (0.0, TypeError("Validator configuration must be an object.")) + # fetch weight + weight = float(validator.get("weight",1.0)) + if weight<0: + return (0.0, TypeError("Validator weights must be non-negative.")) + weights.append(weight) + # fetch type + validator_type = validator.get("type", None) + if validator_type is None: + return (0.0, TypeError("Missing validator type declaration.")) + # execute validator + if validator_type == "check_artifact_exists": + validator_score, validator_errors = check_artifact_exists(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "https_response_time": + validator_score, validator_errors = https_response_time(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "check_test_results": + validator_score, validator_errors = check_test_results(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "file_exists": + validator_score, validator_errors = file_exists(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "sha_checker": + validator_score, validator_errors = sha_checker(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "check_issues": + validator_score, validator_errors = check_issues(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "did_workflows_fail": + validator_score, validator_errors = did_workflows_fail(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "is_branch_protected": + validator_score, validator_errors = is_branch_protected(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + if sum(weights) == 0.0: + return (0.0, exceptions) + else: + return (sum(list(map(lambda x,y: x*y, scores, weights)))/sum(weights),exceptions) From a67b9f3d035d572adcd2cd26e7fe592818056823 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 10:03:24 +0000 Subject: [PATCH 31/67] add coveralls_statement --- .dotstop_extensions/validators.py | 50 ++++++++++++++++++++++++++++-- TSF/trustable/statements/JLS-27.md | 15 +++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 TSF/trustable/statements/JLS-27.md diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index 9bf9518f76..58d1476d72 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -328,10 +328,10 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: owner = configuration.get("owner",None) if owner is None: - return (0.0, RuntimeError("The owner is not specified.")) + return (0.0, RuntimeError("The owner is not specified in the configuration of did_workflows_fail.")) repo = configuration.get("repo",None) if repo is None: - return (0.0, RuntimeError("The repository is not specified.")) + return (0.0, RuntimeError("The repository is not specified in the configuration of did_workflows_fail.")) event = configuration.get("event","push") url = f"https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure" branch = configuration.get("branch",None) @@ -359,6 +359,52 @@ def is_branch_protected(configuration: dict[str, yaml]) -> tuple[float, list[Exc return (0.0, RuntimeError(f"The branch {branch} is not protected!")) except: return (1.0, []) + +def coveralls_reporter(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + owner = configuration.get("owner",None) + if owner is None: + return (0.0, [ValueError("The owner needs to be specified in the configuration for coveralls_reporter.")]) + repo = configuration.get("repo",None) + if repo is None: + return (0.0, [ValueError("The repository needs to be specified in the configuration for coveralls_reporter.")]) + branch = configuration.get("branch",None) + if branch is not None: + url = f"coveralls.io/github/{owner}/{repo}?branch={branch}.json" + else: + url = f"coveralls.io/github/{owner}/{repo}.json" + res = requests.get(url) + if res.status_code != 200: + return (0.0, [RuntimeError(f"Can not reach {url} to fetch the code coverage!")]) + res = json.loads(res.text) + try: + covered_lines = int(res.get("covered_lines","0")) + relevant_lines = int(res.get("relevant_lines","1")) + except ValueError: + return (0.0, [RuntimeError("Critical error in the coveralls api: Expecting integer values for lines!")]) + try: + expected_line_coverage = float(configuration.get("line_coverage","0.0")) + except ValueError: + return (0.0, [ValueError("line_coverage needs to be a floating point value!")]) + try: + digits = int(configuration.get("significant_decimal_digits","3")) + except ValueError: + return (0.0, [ValueError("significant_decimal_digits needs to be an integer value!")]) + if round(expected_line_coverage, digits) != round(covered_lines/relevant_lines * 100, digits): + return (0.0, [Warning("The line coverage has changed!")]) + try: + covered_branches = int(res.get("covered_branches","0")) + relevant_branches = int(res.get("relevant_branches","1")) + except ValueError: + return (0.0, [RuntimeError("Critical error in the coveralls api: Expecting integer values for branches!")]) + try: + expected_branch_coverage = float(configuration.get("branch_coverage","0.0")) + except ValueError: + return (0.0, [ValueError("branch_coverage needs to be a floating point value!")]) + if round(expected_branch_coverage, digits) != round(covered_branches/relevant_branches * 100, digits): + return (0.0, [Warning("The branch coverage has changed!")]) + return (1.0, []) + + def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: validators = configuration.get("validators",None) diff --git a/TSF/trustable/statements/JLS-27.md b/TSF/trustable/statements/JLS-27.md new file mode 100644 index 0000000000..f629aa98bf --- /dev/null +++ b/TSF/trustable/statements/JLS-27.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +evidence: + type: coveralls_reporter + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 +--- + +The test coverage for this release is monitored by coveralls and stays constant. \ No newline at end of file From caa8fad3fb6b295252e5e4079e6872285766c22f Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 10:36:38 +0000 Subject: [PATCH 32/67] add JLS-27 --- .dotstop.dot | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.dotstop.dot b/.dotstop.dot index cee9c1280e..06c39542a1 100644 --- a/.dotstop.dot +++ b/.dotstop.dot @@ -56,6 +56,7 @@ digraph G { "JLS-24" [sha=b16224d3ab676c00b313ae91760072d92aed9f20da99b363621effa3e033e012]; "JLS-25" [sha="8bb517191450f370679dbafd85342e1bbcf797cc84f2a6f1fc119568b534d5e0"]; "JLS-26" [sha=cf1b73b375697ee56d9788aab79ed01b2730b126a2cc4d7041c9525113e7ed7c]; +"JLS-27" [sha="efd4b438331c155eebaec96cd1eda337567794f8696b327562aaaed5fa8ded69"]; "NJF-01" [sha="548dc86014e093974f68660942daa231271496a471885bbed092a375b3079bd8"]; "NJF-02" [sha="6ea015646d696e3f014390ff41612eab66ac940f20cf27ce933cbadf8482d526"]; "NJF-03" [sha="4bd1f8210b7bba9a248055a437f377d9da0b7576c5e3ed053606cf8b5b2febe3"]; @@ -357,6 +358,7 @@ digraph G { "TA-BEHAVIOURS" -> "AOU-23" [sha=""]; "TA-BEHAVIOURS" -> "AOU-24" [sha=""]; "TA-BEHAVIOURS" -> "AOU-25" [sha=""]; +"TA-BEHAVIOURS" -> "JLS-27" [sha="9c3e4b4a4677ca22f28e2bb0cbc39c31676efa4c17ddc1fe66599589b83ef643"]; "TA-CONFIDENCE" -> "JLS-08" [sha="506164051180023c8533ea1f6dedf1bad894c3ee6020ff16b002e33b109c2791"]; "TA-CONFIDENCE" -> "JLS-09" [sha="80bbde95fc14f89acf3dad10b3831bc751943fe4a1d79d5cbf4702416c27530f"]; "TA-CONFIDENCE" -> "AOU-10_COMBINED" [sha="5e5d7dc606d53423fbb1f2d5755780c98839bdc2d108704af5ee1aed50403f5e"]; From 71cdc20eda2fe61a15dbddf8664c019d34dbbc8f Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 10:39:41 +0000 Subject: [PATCH 33/67] tweak formulation of JLS-27 --- TSF/trustable/statements/JLS-27.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-27.md b/TSF/trustable/statements/JLS-27.md index f629aa98bf..2fb344db9d 100644 --- a/TSF/trustable/statements/JLS-27.md +++ b/TSF/trustable/statements/JLS-27.md @@ -12,4 +12,4 @@ evidence: digits: 3 --- -The test coverage for this release is monitored by coveralls and stays constant. \ No newline at end of file +The test coverage for this version of nlohmann/json is monitored by coveralls and stays constant. \ No newline at end of file From c9a32c757416655c71d5800f01eab0b47034e832 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 10:52:59 +0000 Subject: [PATCH 34/67] pretty printing --- TSF/trustable/assertions/TA-Analysis.md | 32 ++++++++++---------- TSF/trustable/assertions/TA-Behaviours.md | 25 +++++++-------- TSF/trustable/assertions/TA-Confidence.md | 8 ++--- TSF/trustable/assertions/TA-Constraints.md | 14 ++++----- TSF/trustable/assertions/TA-Data.md | 20 ++++++------ TSF/trustable/assertions/TA-Fixes.md | 30 +++++++++--------- TSF/trustable/assertions/TA-Indicators.md | 24 +++++++-------- TSF/trustable/assertions/TA-Inputs.md | 30 +++++++++--------- TSF/trustable/assertions/TA-Iterations.md | 10 +++--- TSF/trustable/assertions/TA-Methodologies.md | 20 ++++++------ TSF/trustable/assertions/TA-Misbehaviours.md | 24 +++++++-------- TSF/trustable/assertions/TA-Releases.md | 12 ++++---- TSF/trustable/assertions/TA-Supply-Chain.md | 12 ++++---- TSF/trustable/assertions/TA-Tests.md | 12 ++++---- TSF/trustable/assertions/TA-Updates.md | 12 ++++---- TSF/trustable/assertions/TA-Validation.md | 28 ++++++++--------- 16 files changed, 157 insertions(+), 156 deletions(-) diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-Analysis.md index 77dd636316..b1b19b5e98 100644 --- a/TSF/trustable/assertions/TA-Analysis.md +++ b/TSF/trustable/assertions/TA-Analysis.md @@ -2,63 +2,63 @@ * What fraction of Expectations are covered by the test data? - Answer: Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. + **Answer**: The two expectations are JLEX-01 and JLEX-02. Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. * What fraction of Misbehaviours are covered by the monitored indicator data? - Answer: For the intended use-case, no misbehaviours have been identified. Furthermore, no indicator data are collected. + **Answer**: For the intended use-case, no misbehaviours have been identified. Furthermore, no indicator data are collected. * How confident are we that the indicator data are accurate and timely? - Answer: No indicator data are collected. + **Answer**: No indicator data are collected. * How reliable is the monitoring process? - Answer: Due to no indicator data being collected, there is no monitoring process. + **Answer**: Due to no indicator data being collected, there is no monitoring process. * How well does the production data correlate with our test data? - Answer: Due to the general nature of the library, there are no production data. + **Answer**: Due to the general nature of the library, there are no production data. * Are we publishing our data analysis? - Answer: Since we have no production data with which to compare our not collected indicator data or our test data, no data analysis is done, which is not published. + **Answer**: Since we have no production data with which to compare our not collected indicator data or our test data, no data analysis is done, which is not published. * Are we comparing and analysing production data vs test? - Answer: There are no production data. + **Answer**: There are no production data. * Are our results getting better, or worse? - Answer: Neither. + **Answer**: Neither. * Are we addressing spikes/regressions? - Answer: There are no spikes in the non-existent indicator data. If a test ever fails, then the spike is investigated. The results of fuzz testing are investigated in the original nlohmann/json. + **Answer**: There are no spikes in the non-existent indicator data. If a test ever fails, then the spike is investigated. The results of fuzz testing are investigated in the original nlohmann/json. * Do we have sensible/appropriate target failure rates? - Answer: For the unit and integration tests, zero. The target failure rate of fuzz testing is not under our control. + **Answer**: For the unit and integration tests, zero. The target failure rate of fuzz testing is not under our control. * Do we need to check the targets? - Answer: For the unit and integration tests, no. Since the fuzz testing runs and is investigated in the original nlohmann/json, there is no need to check the target. + **Answer**: For the unit and integration tests, no. Since the fuzz testing runs and is investigated in the original nlohmann/json, there is no need to check the target. * Are we achieving the targets? - Answer: For the unit and integration tests, yes. The achieving of the targets for the fuzz-testing is evaluated within the original nlohmann/json. + **Answer**: For the unit and integration tests, yes. The achieving of the targets for the fuzz-testing is evaluated within the original nlohmann/json. * Are all underlying assumptions and target conditions for the analysis specified? - Answer: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. In case any test fails ever, the failure of the CI-pipeline encourages the maintainer to investigate. + **Answer**: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. In case any test fails ever, the failure of the CI-pipeline encourages the maintainer to investigate. * Have the underlying assumptions been verified using known good data? - Answer: The assumption that all unit and integration tests succeed under the expected conditions is demonstrated by the non-failure of the CI-Pipeline. + **Answer**: The assumption that all unit and integration tests succeed under the expected conditions is demonstrated by the non-failure of the CI-Pipeline. * Has the Misbehaviour identification process been verified using known bad data? - Answer: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour, enabling everyone to verify the identified misbehaviours. There is, however, no automatic process for the identification of misbehaviours. + **Answer**: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour, enabling everyone to verify the identified misbehaviours. There is, however, no automatic process for the identification of misbehaviours. * Are results shown to be reproducible? - Answer: It is expected that the tests can be reproduced on every modern sufficiently powerful machine. + **Answer**: It is expected that the tests can be reproduced on every modern sufficiently powerful machine. diff --git a/TSF/trustable/assertions/TA-Behaviours.md b/TSF/trustable/assertions/TA-Behaviours.md index c9712bbe70..bfd9dfbb66 100644 --- a/TSF/trustable/assertions/TA-Behaviours.md +++ b/TSF/trustable/assertions/TA-Behaviours.md @@ -2,48 +2,49 @@ * How has the list of Expectations varied over time? - Answer: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. + **Answer**: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. * How confident can we be that this list is comprehensive? - Answer: The list of expectations has been collected amongst the stakeholders in S-CORE, so that we are very confident that the list is comprehensive. + **Answer**: The list of expectations has been collected amongst the stakeholders in S-CORE, so that we are very confident that the list is comprehensive. + The expectation to serialize user data into JSON format * Could some participants have incentives to manipulate information? - Answer: We can not imagine any reason. + **Answer**: We can not imagine any reason. * Could there be whole categories of Expectations still undiscovered? - Answer: It is unlikely, but the parsing of CBOR could become relevant at some time. + **Answer**: It is unlikely, but the parsing of CBOR could become relevant at some time. * Can we identify Expectations that have been understood but not specified? - Answer: No. + **Answer**: No. * Can we identify some new Expectations, right now? - Answer: No. + **Answer**: No. * How confident can we be that this list covers all critical requirements? - Answer: We can not think of any more critical requirement of a JSON parser in the sense of RFC8259 than to parse JSON data in the sense of RFC8259. + **Answer**: We can not think of any more critical requirement of a JSON parser in the sense of RFC8259 than to parse JSON data in the sense of RFC8259. * How comprehensive is the list of tests? - Answer: The tests cover 99.186% of the code, according to coveralls, so we assume that the list of tests is quite comprehensive. + **Answer**: Currently, the branch coverage is 93.865% and the line coverage is 99.186%, cf. JLS-27. * Is every Expectation covered by at least one implemented test? - Answer: Yes, both of the expectations are covered by at least one implemented test. + **Answer**: Yes, both of the expectations are covered by at least one implemented test. Moreover, each statement supporting the expectations is covered by a test with the exception of WFJ-06. * Are there any Expectations where we believe more coverage would help? - Answer: No. + **Answer**: No. * How do dependencies affect Expectations, and are their properties verifiable? - Answer: There are no dependencies. + **Answer**: The library nlohmann/json does not have external dependencies, so that there are in particular none that affect Expectations. * Are input analysis findings from components, tools, and data considered in relation to Expectations? - Answer: No findings have been found. + **Answer**: No findings have been found. diff --git a/TSF/trustable/assertions/TA-Confidence.md b/TSF/trustable/assertions/TA-Confidence.md index b171f97d79..9a6786c206 100644 --- a/TSF/trustable/assertions/TA-Confidence.md +++ b/TSF/trustable/assertions/TA-Confidence.md @@ -2,16 +2,16 @@ * What is the algorithm for combining/comparing the scores? - Answer: It is the standard algorithm of trudag. + **Answer**: It is the standard algorithm of trudag. * How confident are we that this algorithm is fit for purpose? - Answer: We have no reason to assume that the standard algorithm is not fit for our purpose. + **Answer**: We have no reason to assume that the standard algorithm is not fit for our purpose. * What are the trends for each score? - Answer: CAN NOT BE ANSWERED NOW + **Answer**: CAN NOT BE **Answer**ED NOW * How well do our scores correlate with external feedback signals? - Answer: CAN NOT BE ANSWERED NOW \ No newline at end of file + **Answer**: CAN NOT BE **Answer**ED NOW \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md index 95c06d0ae1..4675045007 100644 --- a/TSF/trustable/assertions/TA-Constraints.md +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -2,28 +2,28 @@ * Are the constraints grounded in realistic expectations, backed by real-world examples? - Answer: The constraints originate from S-CORE and the library itself. + **Answer**: The constraints originate from S-CORE and the library itself. * Do they effectively guide downstream consumers in expanding upon existing Statements? - Answer: ????? + **Answer**: ????? * Do they provide clear guidance for upstreams on reusing components with well-defined claims? - Answer: ????? + **Answer**: ????? * Are any Statements explicitly designated as not reusable or adaptable? - Answer: NO????? + **Answer**: NO????? * Are there worked examples from downstream or upstream users demonstrating these constraints in practice? - Answer: ???? + **Answer**: ???? * Have there been any documented misunderstandings from users, and are these visibly resolved? - Answer: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). + **Answer**: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). * Do external users actively keep up with updates, and are they properly notified of any changes? - Answer: External users of the library are not necessarily automatically notified of an update, and are neither assumed nor required to keep up to date. If the external user forks the github repository, however, then github shows automatically whenever the upstream changes. \ No newline at end of file + **Answer**: External users of the library are not necessarily automatically notified of an update, and are neither assumed nor required to keep up to date. If the external user forks the github repository, however, then github shows automatically whenever the upstream changes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Data.md b/TSF/trustable/assertions/TA-Data.md index e735df9f10..a0830a0fd2 100644 --- a/TSF/trustable/assertions/TA-Data.md +++ b/TSF/trustable/assertions/TA-Data.md @@ -2,40 +2,40 @@ * Is all test data stored with long-term accessibility? - Answer: If we assume that github is long-term accessible, then yes. + **Answer**: If we assume that github is long-term accessible, then yes. * Is all monitoring data stored with long-term accessibility? - Answer: There are no monitoring data. + **Answer**: There are no monitoring data. * Are extensible data models implemented? - Answer: The data are stored in an sqlite database. + **Answer**: The data are stored in an sqlite database. * Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy? - Answer: There are no sensitive data produced, collected or stored. + **Answer**: There are no sensitive data produced, collected or stored. * Are proper backup mechanisms in place? - Answer: Not more than the default mechanisms of github. + **Answer**: Not more than the default mechanisms of github. * Are storage and backup limits tested? - Answer: No. + **Answer**: No. * Are all data changes traceable? - Answer: Yes, due to the usage of github. + **Answer**: Yes, due to the usage of github. * Are concurrent changes correctly managed and resolved? - Answer: Yes, due to the usage of github. + **Answer**: Yes, due to the usage of github. * Is data accessible only to intended parties? - Answer: Since the library is open source, there are no unintended parties. + **Answer**: Since the library is open source, there are no unintended parties. * Are any subsets of our data being published? - Answer: Yes, the collected data are publicly available. \ No newline at end of file + **Answer**: Yes, the collected data are publicly available. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Fixes.md b/TSF/trustable/assertions/TA-Fixes.md index 70d57a7247..aa0d619c07 100644 --- a/TSF/trustable/assertions/TA-Fixes.md +++ b/TSF/trustable/assertions/TA-Fixes.md @@ -3,60 +3,60 @@ * How many faults have we identified in XYZ? - Answer: There are no identifiable faults concerning the expectations. + **Answer**: There are no identifiable faults concerning the expectations. * How many unknown faults remain to be found, based on the number that have been processed so far? - Answer: It is unlikely that there are unknown faults concerning the expectations. + **Answer**: It is unlikely that there are unknown faults concerning the expectations. * Is there any possibility that people could be motivated to manipulate the lists (e.g. bug bonus or pressure to close). - Answer: Since the project is entirely open source, it is quite unlikely. + **Answer**: Since the project is entirely open source, it is quite unlikely. * How many faults may be unrecorded (or incorrectly closed, or downplayed)? - Answer: There may be none, at least when it concerns the expectations. + **Answer**: There may be none, at least when it concerns the expectations. * How do we collect lists of bugs and known vulnerabilities from components? - Answer: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. This list is then stored using github, thereby enabling a traceability of the list. + **Answer**: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. This list is then stored using github, thereby enabling a traceability of the list. * How (and how often) do we check these lists for relevant bugs and known vulnerabilities? - Answer: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is encouraged by the change of the trustable score to check this issue on applicability. + **Answer**: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is encouraged by the change of the trustable score to check this issue on applicability. * How confident can we be that the lists are honestly maintained? - Answer: We can not imagine a reason why the list could be dishonestly maintained. + **Answer**: We can not imagine a reason why the list could be dishonestly maintained. * Could some participants have incentives to manipulate information? - Answer: We can not think of a reason why. + **Answer**: We can not think of a reason why. * How confident are we that the lists are comprehensive? - Answer: We have no reason to assume that discovered bugs are not reported to nlohmann/json. + **Answer**: We have no reason to assume that discovered bugs are not reported to nlohmann/json. * Could there be whole categories of bugs/vulnerabilities still undiscovered? - Answer: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug, instead it is likely that perceived issues due to a misunderstanding of how the library works are labelled as bug. + **Answer**: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug, instead it is likely that perceived issues due to a misunderstanding of how the library works are labelled as bug. * How effective is our triage/prioritisation? - Answer: ????? Since it is not intended to fix the library within S-CORE, but instead leave the development to the original nlohmann/json, there is no need to have a triage or prioritisation. + **Answer**: ????? Since it is not intended to fix the library within S-CORE, but instead leave the development to the original nlohmann/json, there is no need to have a triage or prioritisation. * How many components have never been updated? - Answer: None, the single component is up to date. + **Answer**: None, the single component is up to date. * How confident are we that we could update them? - Answer: If nlohmann/json would release an new version, we are very confident that we can update to that version. + **Answer**: If nlohmann/json would release an new version, we are very confident that we can update to that version. * How confident are we that outstanding fixes do not impact our Expectations? - Answer: We have not found any outstanding fixes impacting our expectations. + **Answer**: We have not found any outstanding fixes impacting our expectations. * How confident are we that outstanding fixes do not address Misbehaviours? - Answer: For all of the none identified misbehaviours, we are very confident that none of the outstanding fixes do not address them. \ No newline at end of file + **Answer**: For all of the none identified misbehaviours, we are very confident that none of the outstanding fixes do not address them. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Indicators.md b/TSF/trustable/assertions/TA-Indicators.md index 1b5cf3f7ab..0ca7405667 100644 --- a/TSF/trustable/assertions/TA-Indicators.md +++ b/TSF/trustable/assertions/TA-Indicators.md @@ -2,48 +2,48 @@ * How appropriate/thorough are the analyses that led to the indicators? - Answer: Since no misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259 have been identified, no warning indicators are implemented. + **Answer**: Since no misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259 have been identified, no warning indicators are implemented. * How confident can we be that the list of indicators is comprehensive? - Answer: There are no warning indicators implemented, of which we are very confident. + **Answer**: There are no warning indicators implemented, of which we are very confident. * Could there be whole categories of warning indicators still missing? - Answer: Yes, there could. Within S-CORE, however, any warning indicator that is not natively implemented within the original nlohmann/json should be implemented in the wrapper defining the interface between the library and the project using it. + **Answer**: Yes, there could. Within S-CORE, however, any warning indicator that is not natively implemented within the original nlohmann/json should be implemented in the wrapper defining the interface between the library and the project using it. * How has the list of advance warning indicators varied over time? - Answer: It has stayed constant. + **Answer**: It has stayed constant. * How confident are we that the indicators are leading/predictive? - Answer: There are none. + **Answer**: There are none. * Are there misbehaviours that have no advance warning indicators? - Answer: There are no misbehaviours identified. + **Answer**: There are no misbehaviours identified. * Can we collect data for all indicators? - Answer: There are currently no implemented indicators, so that no data are collected. + **Answer**: There are currently no implemented indicators, so that no data are collected. * Are the monitoring mechanisms used included in our Trustable scope? - Answer: No, but there are also none. + **Answer**: No, but there are also none. * Are there gaps or trends in the data? - Answer: There are no data where gaps or trends could be identified. + **Answer**: There are no data where gaps or trends could be identified. * If there are gaps or trends, are they analysed and addressed? - Answer: There are no data. + **Answer**: There are no data. * Is the data actually predictive/useful? - Answer: There are no data. + **Answer**: There are no data. * Are indicators from code, component, tool, or data inspections taken into consideration? - Answer: There are no indicators. \ No newline at end of file + **Answer**: There are no indicators. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Inputs.md b/TSF/trustable/assertions/TA-Inputs.md index b407067959..d624636ceb 100644 --- a/TSF/trustable/assertions/TA-Inputs.md +++ b/TSF/trustable/assertions/TA-Inputs.md @@ -4,60 +4,60 @@ The single_include/nlohmann/json.hpp is the single and only component of the lib * Are there components that are not on the list? - Answer: No. + **Answer**: No. * Are there assessments for all components? - Answer: ????? + **Answer**: ????? * Has an assessment been done for the current version of the component? - Answer: ????? + **Answer**: ????? * Have sources of bug and/or vulnerability data been identified? - Answer: There are no bug and/or vulnerability data. + **Answer**: There are no bug and/or vulnerability data. * Have additional tests and/or Expectations been documented and linked to component assessment? - Answer: ?????? + **Answer**: ?????? * Are component tests run when integrating new versions of components? - Answer: There are no further components. + **Answer**: There are no further components. * Are there tools that are not on the list? - Answer: The library does not use external tools, except for the tools provided by the C++ standard library. + **Answer**: The library does not use external tools, except for the tools provided by the C++ standard library. * Are there impact assessments for all tools? - Answer: ?????? The library does not use external tools for which an impact assessment has to be done. + **Answer**: ?????? The library does not use external tools for which an impact assessment has to be done. * Have tools with high impact been qualified? - Answer: There are no tools with high impact. + **Answer**: There are no tools with high impact. * Were assessments or reviews done for the current tool versions? - Answer: ????? The library does not use external tools for which an impact assessment has to be done. + **Answer**: ????? The library does not use external tools for which an impact assessment has to be done. * Have additional tests and/or Expectations been documented and linked to tool assessments? - Answer: No. + **Answer**: No. * Are tool tests run when integrating new versions of tools? - Answer: The library does not use external tools for which a new version needs to be integrated. + **Answer**: The library does not use external tools for which a new version needs to be integrated. * Are tool and component tests included in release preparation? - Answer: Yes, the tests of the library are included in the release. + **Answer**: Yes, the tests of the library are included in the release. * Can patches be applied, and then upstreamed for long-term maintenance? - Answer: Yes, if ever a misbehaviour is found and patched, then a pull-request to the original nlohmann/json repository can be opened to upstream the changes. + **Answer**: Yes, if ever a misbehaviour is found and patched, then a pull-request to the original nlohmann/json repository can be opened to upstream the changes. * Do all dependencies comply with acceptable licensing terms? - Answer: Yes, the library is licensed under MIT License . \ No newline at end of file + **Answer**: Yes, the library is licensed under MIT License . \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Iterations.md b/TSF/trustable/assertions/TA-Iterations.md index 4c2daada2d..5a26f8b75e 100644 --- a/TSF/trustable/assertions/TA-Iterations.md +++ b/TSF/trustable/assertions/TA-Iterations.md @@ -2,20 +2,20 @@ * How much of the software is provided as binary only, expressed as a fraction of the BoM list? - Answer: None. + **Answer**: None. * How much is binary, expressed as a fraction of the total storage footprint? - Answer: None. + **Answer**: None. * For binaries, what claims are being made and how confident are we in the people/organisations making the claims? - Answer: There are no binaries. + **Answer**: There are no binaries. * For third-party source code, what claims are we making, and how confident are we about these claims? - Answer: There is no third-party source code in the library. + **Answer**: There is no third-party source code in the library. * For software developed by us, what claims are we making, and how confident are we about these claims? - Answer: This is the remainder of the documentation. \ No newline at end of file + **Answer**: This is the remainder of the documentation. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Methodologies.md b/TSF/trustable/assertions/TA-Methodologies.md index c6a73b7aeb..101fedf5b4 100644 --- a/TSF/trustable/assertions/TA-Methodologies.md +++ b/TSF/trustable/assertions/TA-Methodologies.md @@ -4,40 +4,40 @@ This project follows purely the Methodologies of Eclipse S-CORE. * Are the identified gaps documented clearly to justify using a manual process? - Answer: + **Answer**: * Are the goals for each process clearly defined? - Answer: + **Answer**: * Is the sequence of procedures documented in an unambiguous manner? - Answer: + **Answer**: * Can improvements to the processes be suggested and implemented? - Answer: + **Answer**: * How frequently are processes changed? - Answer: + **Answer**: * How are changes to manual processes communicated? - Answer: + **Answer**: * Are there any exceptions to the processes? - Answer: + **Answer**: * How is evidence of process adherence recorded? - Answer: + **Answer**: * How is the effectiveness of the process evaluated? - Answer: + **Answer**: * Is ongoing training required to follow these processes? - Answer: \ No newline at end of file + **Answer**: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Misbehaviours.md b/TSF/trustable/assertions/TA-Misbehaviours.md index 31a4c65c2d..773a481558 100644 --- a/TSF/trustable/assertions/TA-Misbehaviours.md +++ b/TSF/trustable/assertions/TA-Misbehaviours.md @@ -2,48 +2,48 @@ * How has the list of misbehaviours varied over time? - Answer: The list of misbehaviours is collected using github and its development is thereby understandable. + **Answer**: The list of misbehaviours is collected using github and its development is thereby understandable. * How confident can we be that this list is comprehensive? - Answer: Due to the collaborative nature of the open source community, we deem it quite unlikely that there are any known misbehaviours which are not reported to the repository nlohmann/json. + **Answer**: Due to the collaborative nature of the open source community, we deem it quite unlikely that there are any known misbehaviours which are not reported to the repository nlohmann/json. * How well do the misbehaviours map to the expectations? - Answer: There are no identified misbehaviours that tangent the expectations. + **Answer**: There are no identified misbehaviours that tangent the expectations. * Could some participants have incentives to manipulate information? - Answer: We could not think of an incentive that any collaborateur could have to manipulate the information. + **Answer**: We could not think of an incentive that any collaborateur could have to manipulate the information. * Could there be whole categories of misbehaviours still undiscovered? - Answer: Due to the wide use and long-standing development of the library it is quite unlikely that any major misbehaviors, in particular regarding the parsing and validating of JSON data in the sense of RFC-8259, is undiscovered. + **Answer**: Due to the wide use and long-standing development of the library it is quite unlikely that any major misbehaviors, in particular regarding the parsing and validating of JSON data in the sense of RFC-8259, is undiscovered. * Can we identify misbehaviours that have been understood but not specified? - Answer: No. + **Answer**: No. * Can we identify some new misbehaviours, right now? - Answer: No. + **Answer**: No. * Is every misbehaviour represented by at least one fault induction test? - Answer: Since there are no misbehaviours that concern the use within S-CORE, no. + **Answer**: Since there are no misbehaviours that concern the use within S-CORE, no. * Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately? - Answer: ?????? No. + **Answer**: ?????? No. * Are all the fault induction results actually collected? - Answer: ?????? No. + **Answer**: ?????? No. * Are the results evaluated? - Answer: ?????? No. + **Answer**: ?????? No. * Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations? - Answer: Currently, there is no analysis which identifies additional misbehaviours. The only such analysis is indirectly via the analysis of the fuzz testing, which currently does not identifies additional misbehaviours. \ No newline at end of file + **Answer**: Currently, there is no analysis which identifies additional misbehaviours. The only such analysis is indirectly via the analysis of the fuzz testing, which currently does not identifies additional misbehaviours. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Releases.md b/TSF/trustable/assertions/TA-Releases.md index b411f76fde..ece1bc453f 100644 --- a/TSF/trustable/assertions/TA-Releases.md +++ b/TSF/trustable/assertions/TA-Releases.md @@ -2,24 +2,24 @@ * How confident are we that all components are taken from within our controlled environment? - Answer: This library does not take anything from outside of this repository. + **Answer**: This library does not take anything from outside of this repository. * How confident are we that all of the tools we are using are also under our control? - Answer: The version of nlohmann/json that is documented with this documentation is under the full control of the Eclipse S-CORE organisation. + **Answer**: The version of nlohmann/json that is documented with this documentation is under the full control of the Eclipse S-CORE organisation. * Are our builds repeatable on a different server, or in a different context? - Answer: Since there is no "build" of the header-only library, yes. + **Answer**: Since there is no "build" of the header-only library, yes. * How sure are we that our builds don't access the internet? - Answer: There is no implemented access to the internet in the library itself. The testsuite is downloaded from a within Eclipse S-CORE. + **Answer**: There is no implemented access to the internet in the library itself. The testsuite is downloaded from a within Eclipse S-CORE. * How many of our components are non-reproducible? - Answer: The single component is reproducible. + **Answer**: The single component is reproducible. * How confident are we that our reproducibility check is correct? - Answer: Quite. \ No newline at end of file + **Answer**: Quite. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Supply-Chain.md b/TSF/trustable/assertions/TA-Supply-Chain.md index 0c3df92e7c..dbb2e3f0a3 100644 --- a/TSF/trustable/assertions/TA-Supply-Chain.md +++ b/TSF/trustable/assertions/TA-Supply-Chain.md @@ -2,24 +2,24 @@ * Could there be other components, missed from the list? - Answer: Since the library does not contain any external components, no. + **Answer**: Since the library does not contain any external components, no. * Does the list include all toolchain components? - Answer: Since the library does not contain any external components, yes. + **Answer**: Since the library does not contain any external components, yes. * Does the toolchain include a bootstrap? - Answer: ???? No. + **Answer**: ???? No. * Could the content of a mirrored project be compromised by an upstream change? - Answer: Since the library does not contain any external components, no. + **Answer**: Since the library does not contain any external components, no. * Are mirrored projects up to date with the upstream project? - Answer: Yes, the library is up to date with the most recent release of the original nlohmann/json + **Answer**: Yes, the library is up to date with the most recent release of the original nlohmann/json * Are mirrored projects based on the correct upstream? - Answer: Yes. \ No newline at end of file + **Answer**: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Tests.md b/TSF/trustable/assertions/TA-Tests.md index a376635c63..8f05b60bdd 100644 --- a/TSF/trustable/assertions/TA-Tests.md +++ b/TSF/trustable/assertions/TA-Tests.md @@ -2,24 +2,24 @@ * How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible? - Answer: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO, cf. AOU-14) + **Answer**: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO, cf. AOU-14) * Are any exceptions identified, documented and justified? - Answer: To the best of our understanding, there are no exceptions identified. + **Answer**: To the best of our understanding, there are no exceptions identified. * How confident are we that all test components are taken from within our controlled environment? - Answer: All tests are either self-contained or download test data from [within Eclipse S-CORE](https://github.com/eclipse-score/inc_nlohmann_json/tree/json_test_data_version_3_1_0_mirror). + **Answer**: All tests are either self-contained or download test data from [within Eclipse S-CORE](https://github.com/eclipse-score/inc_nlohmann_json/tree/json_test_data_version_3_1_0_mirror). * How confident are we that all of the test environments we are using are also under our control? - Answer: ???? The environments are standard docker images of ubuntu and standard versions of compilers. + **Answer**: ???? The environments are standard docker images of ubuntu and standard versions of compilers. * Do we record all test environment components, including hardware and infrastructure used for exercising tests and processing input/output data? - Answer: No, since the tests are independent from hard-ware, these data are not collected. + **Answer**: No, since the tests are independent from hard-ware, these data are not collected. * How confident are we that all tests scenarios are repeatable? - Answer: All test scenarios are repeated daily in the CI pipeline. \ No newline at end of file + **Answer**: All test scenarios are repeated daily in the CI pipeline. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Updates.md b/TSF/trustable/assertions/TA-Updates.md index 72da18b81a..482d412201 100644 --- a/TSF/trustable/assertions/TA-Updates.md +++ b/TSF/trustable/assertions/TA-Updates.md @@ -2,24 +2,24 @@ * Where are the change and configuration management controls specified? - Answer: WIP + **Answer**: WIP * Are these controls enforced for all of components, tools, data, documentation and configurations? - Answer: The S-CORE Methodology is followed, compliance with which enforces the change process to be followed. + **Answer**: The S-CORE Methodology is followed, compliance with which enforces the change process to be followed. * Are there any ways in which these controls can be subverted, and have we mitigated them? - Answer: Yes, the change process can just not be followed. We have no real method to enforce it other than to trust that the committers follow the S-CORE processes. + **Answer**: Yes, the change process can just not be followed. We have no real method to enforce it other than to trust that the committers follow the S-CORE processes. * Does change control capture all potential regressions? - Answer: Due to the test coverage of 99.186%, it is unlikely that a potential regression is not captured. + **Answer**: Due to the test coverage of 99.186%, it is unlikely that a potential regression is not captured. * Is change control timely enough? - Answer: Not applicable, as far as can be understood right now, there is no immanent need to keep the library up to date. + **Answer**: Not applicable, as far as can be understood right now, there is no immanent need to keep the library up to date. * Are all guidance and checks understandable and consistently followed? - Answer: WIP \ No newline at end of file + **Answer**: WIP \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-Validation.md b/TSF/trustable/assertions/TA-Validation.md index 7867a5e0e2..8e23bae526 100644 --- a/TSF/trustable/assertions/TA-Validation.md +++ b/TSF/trustable/assertions/TA-Validation.md @@ -1,55 +1,55 @@ #### Checklist for TA-VALIDATION from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) -I DO NOT FEEL CONFIDENT TO ANSWER THIS! +I DO NOT FEEL CONFIDENT TO **Answer** THIS! * Is the selection of tests correct? - Answer: ???? Who could tell this? + **Answer**: ???? Who could tell this? * Are the tests executed enough times? - Answer: ???? Define "enough times" + **Answer**: ???? Define "enough times" * How confident are we that all test results are being captured? - Answer: ???? How fine-grained is a test-result supposed to be? + **Answer**: ???? How fine-grained is a test-result supposed to be? * Can we look at any individual test result, and establish what it relates to? - Answer: ???? + **Answer**: ???? * Can we trace from any test result to the expectation it relates to? - Answer: No, there are more tests than expectations, and in particular tests that relate to the inner workings of the library which are not used by S-CORE. + **Answer**: No, there are more tests than expectations, and in particular tests that relate to the inner workings of the library which are not used by S-CORE. * Can we identify precisely which environment (software and hardware) were used? - Answer: ???? How precisely shall that be? Moreover, the tests are supposed to run independent of underlying hardware, since this is a software. + **Answer**: ???? How precisely shall that be? Moreover, the tests are supposed to run independent of underlying hardware, since this is a software. * How many pass/fail results would be expected, based on the scheduled tests? - Answer: Zero fails. + **Answer**: Zero fails. * Do we have all of the expected results? - Answer: Yes. + **Answer**: Yes. * Do we have time-series data for all of those results? - Answer: Yes, there are time-series data. + **Answer**: Yes, there are time-series data. * If there are any gaps, do we understand why? - Answer: ???? Define gaps + **Answer**: ???? Define gaps * Are the test validation strategies credible and appropriate? - Answer: ???? Define test validation strategies + **Answer**: ???? Define test validation strategies * What proportion of the implemented tests are validated? - Answer: ???? None. + **Answer**: ???? None. * Have the tests been verified using known good and bad data? - Answer: ???? \ No newline at end of file + **Answer**: ???? \ No newline at end of file From 867fdf97efc96a17891722d47d48cf8f3d6ae613 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 12:14:07 +0000 Subject: [PATCH 35/67] adapt tests for gcc-15 * gcc-15 complains with some tests of release 3.12.0 * solution: remove semicolon in unit-regression2 and add noexcept in unit-user_defined_input * solution copied from commit 6b9199382bca57d5c85609c54f70279f0d5e7f25 on nlohmann/json --- tests/src/unit-regression2.cpp | 2 +- tests/src/unit-user_defined_input.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/src/unit-regression2.cpp b/tests/src/unit-regression2.cpp index 2c3977fef9..bce447e363 100644 --- a/tests/src/unit-regression2.cpp +++ b/tests/src/unit-regression2.cpp @@ -388,7 +388,7 @@ struct Example_3810 Example_3810() = default; }; -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Example_3810, bla); // NOLINT(misc-use-internal-linkage) +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Example_3810, bla) // NOLINT(misc-use-internal-linkage) TEST_CASE("regression tests 2") { diff --git a/tests/src/unit-user_defined_input.cpp b/tests/src/unit-user_defined_input.cpp index befc4b17af..5115e8fd30 100644 --- a/tests/src/unit-user_defined_input.cpp +++ b/tests/src/unit-user_defined_input.cpp @@ -60,12 +60,12 @@ TEST_CASE("Custom container member begin/end") { const char* data; - const char* begin() const + const char* begin() const noexcept { return data; } - const char* end() const + const char* end() const noexcept { return data + strlen(data); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) } From 6e1e6add7239365cf931022d4a1a99b463094bc6 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 12:27:28 +0000 Subject: [PATCH 36/67] pretty printing --- TSF/trustable/assertions/TA-Confidence.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TSF/trustable/assertions/TA-Confidence.md b/TSF/trustable/assertions/TA-Confidence.md index 9a6786c206..0160733b07 100644 --- a/TSF/trustable/assertions/TA-Confidence.md +++ b/TSF/trustable/assertions/TA-Confidence.md @@ -10,8 +10,8 @@ * What are the trends for each score? - **Answer**: CAN NOT BE **Answer**ED NOW + **Answer**: CAN NOT BE ANSWERED NOW * How well do our scores correlate with external feedback signals? - **Answer**: CAN NOT BE **Answer**ED NOW \ No newline at end of file + **Answer**: CAN NOT BE ANSWERED NOW \ No newline at end of file From f18c30c619d54499bdaf06deb85c0a3e789af320 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Fri, 24 Oct 2025 15:44:23 +0000 Subject: [PATCH 37/67] amend JLS-05 --- TSF/trustable/statements/JLS-05.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-05.md b/TSF/trustable/statements/JLS-05.md index 5560530589..b1e273beb4 100644 --- a/TSF/trustable/statements/JLS-05.md +++ b/TSF/trustable/statements/JLS-05.md @@ -17,6 +17,12 @@ references: - type: project_website url: "https://github.com/nlohmann/json/pulse" description: "presents activity over the past week" + - type: project_website + url: "https://github.com/orgs/score-json/discussions/27#discussion-8594385" + description: "comparison between JSON libraries demonstrating the popularity of nlohmann/json" + - type: project_website + url: "https://json.nlohmann.me/home/customers/" + description: "list of large projects using nlohmann/json" evidence: type: https_response_time configuration: @@ -32,4 +38,4 @@ score: Erikhu1: 1.0 --- -The OSS nlohmann/json is widely used, actively maintained and uses github issues to track bugs and misbehaviours. \ No newline at end of file +The OSS nlohmann/json is widely used and actively maintained; bugs and misbehaviours are tracked publicly and transparently. \ No newline at end of file From d810627290f81071d3d91b0b3687018e591f59b3 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 27 Oct 2025 05:13:31 +0000 Subject: [PATCH 38/67] add monitoring AOU --- TSF/trustable/assumptions-of-use/AOU-29.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 TSF/trustable/assumptions-of-use/AOU-29.md diff --git a/TSF/trustable/assumptions-of-use/AOU-29.md b/TSF/trustable/assumptions-of-use/AOU-29.md new file mode 100644 index 0000000000..971e8fa829 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-29.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator evaluate the necessity of monitoring mechanisms and implement these together with a persistent storage of monitoring data as needed. \ No newline at end of file From 86c5d2dbabc72c3e1c866ae104d8d2d915fcc9d2 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 27 Oct 2025 05:13:53 +0000 Subject: [PATCH 39/67] reformulate checklist --- TSF/trustable/assertions/TA-Constraints.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-Constraints.md index 4675045007..2c123d8ee7 100644 --- a/TSF/trustable/assertions/TA-Constraints.md +++ b/TSF/trustable/assertions/TA-Constraints.md @@ -2,7 +2,7 @@ * Are the constraints grounded in realistic expectations, backed by real-world examples? - **Answer**: The constraints originate from S-CORE and the library itself. + **Answer**: The constraints originate from S-CORE (e.g. AOU-04, AOU-05, AOU-07, AOU-21), the standard RFC-8259 (e.g. AOU-05, AOU-20, AOU-21) and the library nlohmann/json itself (AOU-06, AOU-20) in order to ensure that the expectations are met. * Do they effectively guide downstream consumers in expanding upon existing Statements? @@ -14,7 +14,7 @@ * Are any Statements explicitly designated as not reusable or adaptable? - **Answer**: NO????? + **Answer**: No statement has been intentionally designated as not reusable or adaptable. * Are there worked examples from downstream or upstream users demonstrating these constraints in practice? @@ -22,7 +22,7 @@ * Have there been any documented misunderstandings from users, and are these visibly resolved? - **Answer**: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). + **Answer**: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) (cf. AOU-06) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). * Do external users actively keep up with updates, and are they properly notified of any changes? From dbd6f05055fcabe6ae19385230503f01bbfc112f Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 27 Oct 2025 06:21:12 +0000 Subject: [PATCH 40/67] adapt README --- .dotstop_extensions/README.md | 37 ++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 241de30696..6e57029c78 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -14,9 +14,9 @@ Custom references are defined in `references.py`. A (custom) reference is used b ## CPPTestReference -The content of a `CPPTestReference` is given by the lines of code corresponding to a test-case or a section of a test-case in the unit-tests given in tests/src and TSF/tests. +The content of a `CPPTestReference` is given by the lines of code corresponding to a test-case or a section of a test-case in a specified unit-test-file. The sections are identified in the value of "name", where the nested sections are separated by semicolons. -For the `CPPTestReference` an example is: +For the `CPPTestReference` the expected configuration is: ``` --- ... @@ -30,9 +30,12 @@ references: ## JSONTestsuiteReference -The content of a `JSONTestsuiteReference` is given by the lines of code corresponding to a test-case or a section of a test-case in the unit tests, where a (list of) specified test-file(s) located on an external test-repository is utilized, and the content of these test-files. +The `JSONTestsuiteReference` is a variant of the function reference, which is augmented by an external file containing test-data in the form of well- or ill-formed JSON candidate data. +A `JSONTestsuiteReference` is therefore given by the data of a `CPPTestReference` together with a list containing the paths to these external files. +The external files are stored in a separate branch of the repository, and their text is loaded via call to github. +The content of a `JSONTestsuiteReference` is given by the content of the underlying `CPPTestReference` together with the sum of the contents of the external test-suite files. -For the `JSONTestsuiteReference` an example is: +For the `JSONTestsuiteReference` the expected configuration is: ``` --- ... @@ -65,7 +68,8 @@ references: --- ``` -Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter; additionally, it is possible to give a description. The full example is: +Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter. +The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: ``` --- ... @@ -352,6 +356,29 @@ evidence: It is of utmost importance that the arguments come with quotation marks, otherwise, the update helper does not work as intended. +## coveralls_reporter + +The automatic validator `coveralls_reporter` queries the [coveralls](https://coveralls.io/) api to get the line and branch coverages calculated by the service, which is running on the repository. +Unless the version of `nlohmann/json` documented in this repository changes, it is expected that both coverage numbers remain constant. +When initialising the reference, the current code coverage is given as a parameter, to which the fetched coverages are compared. +If no branch is specified, then the most recently calculated coverage is fetched, so that it is generally recommended to specify a branch. +Moreover, it is possible to specify the number of decimal digits, which is defaulted to three, when not specified. +The validator returns a score of 1.0 if both fetched coverages rounded to the specified number of decimal digits coincide with the specified ones, and a score of 0.0 otherwise. + +The expected configuration is the following: + +``` +evidence: + type: coveralls_reporter + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 +``` + # Data store interface From 94ade28cf9b0d8969398c7eab8f902567d43cdbf Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 27 Oct 2025 10:18:42 +0000 Subject: [PATCH 41/67] fix wrong validator name --- TSF/trustable/statements/JLS-11.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-11.md b/TSF/trustable/statements/JLS-11.md index e95b6eba81..c5fd26f1c6 100644 --- a/TSF/trustable/statements/JLS-11.md +++ b/TSF/trustable/statements/JLS-11.md @@ -2,7 +2,7 @@ level: 1.1 normative: true evidence: - type: issue_checker + type: check_issues configuration: release_date: "2025-04-11T08:43:39Z" list_of_known_misbehaviours: "./TSF/docs/nlohmann_misbehaviours_comments.md" From 8fbf587a4179022058387fe14828ef8d2f06f2c1 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Mon, 27 Oct 2025 11:58:10 +0000 Subject: [PATCH 42/67] fix return value for validators --- .dotstop_extensions/validators.py | 40 +++++++++++++++---------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index 2732c16420..afcd6027b7 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -11,7 +11,6 @@ from TSF.scripts.generate_list_of_tests import ListOfTestsGenerator import hashlib import json -from datetime import datetime, timezone import re import subprocess @@ -149,7 +148,7 @@ def check_test_results(configuration: dict[str, yaml]) -> tuple[float, list[Exce # get the test-names raw_tests = configuration.get("tests",None) if raw_tests is None: - return(1.0, Warning("Warning: No tests specified! Assuming absolute trustability!")) + return(1.0, [Warning("Warning: No tests specified! Assuming absolute trustability!")]) # process test-names tests = [] for test in raw_tests: @@ -291,15 +290,16 @@ def sha_checker(configuration: dict[str, yaml]) -> tuple[float, list[Exception | return (score, exceptions) def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + from datetime import datetime, timezone # get relevant release date release_date = configuration.get("release_date",None) if release_date is None: - return (0.0, RuntimeError("The release date of the most recent version of nlohmann/json is not specified.")) + return (0.0, [RuntimeError("The release date of the most recent version of nlohmann/json is not specified.")]) else: try: release_time = datetime.strptime(release_date,"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).timestamp() except: - return(0.0, RuntimeError("The format of the release date is to be %Y-%m-%dT%H:%M:%SZ")) + return(0.0, [RuntimeError("The format of the release date is to be %Y-%m-%dT%H:%M:%SZ")]) # get path to static list of misbehaviours raw_known_misbehaviours = configuration.get("list_of_known_misbehaviours",None) # parse list of inapplicable misbehaviours @@ -331,10 +331,10 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception and (all_open_issues[i].get("labels"))[0].get("name") == "kind: bug" ] except: - return(0.0, RuntimeError("The list of open issues could not be extracted.")) + return(0.0, [RuntimeError("The list of open issues could not be extracted.")]) for issue in relevant_open_issues: if issue not in inapplicable_misbehaviours and issue is not None: - return(0.0,[]) + return(0.0, []) # parse raw list of closed misbehaviours try: with open("raw_closed_issues.json") as list_2: @@ -349,20 +349,20 @@ def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception >=release_time ] except: - return(0.0, RuntimeError("The list of closed issues could not be extracted.")) + return(0.0, [RuntimeError("The list of closed issues could not be extracted.")]) for issue in relevant_closed_issues: if issue not in inapplicable_misbehaviours and issue is not None: - return(0.0,[]) + return(0.0, []) # If you are here, then there are no applicable misbehaviours. return (1.0, []) def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: owner = configuration.get("owner",None) if owner is None: - return (0.0, RuntimeError("The owner is not specified in the configuration of did_workflows_fail.")) + return (0.0, [RuntimeError("The owner is not specified in the configuration of did_workflows_fail.")]) repo = configuration.get("repo",None) if repo is None: - return (0.0, RuntimeError("The repository is not specified in the configuration of did_workflows_fail.")) + return (0.0, [RuntimeError("The repository is not specified in the configuration of did_workflows_fail.")]) event = configuration.get("event","push") url = f"https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure" branch = configuration.get("branch",None) @@ -370,24 +370,24 @@ def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exce url += "+branch%3A{branch}" res = requests.get(url) if res.status_code != 200: - return (0.0, RuntimeError(f"The website {url} can not be successfully reached!")) + return (0.0, [RuntimeError(f"The website {url} can not be successfully reached!")]) m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) if m is None: - return (0.0, RuntimeError("The number of failed workflows can not be found.")) + return (0.0, [RuntimeError("The number of failed workflows can not be found.")]) if m.group(1).strip() != "0": - return (0.0, Warning("There are failed workflows!")) + return (0.0, [Warning("There are failed workflows!")]) return (1.0, []) def is_branch_protected(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: branch = configuration.get("branch",None) if branch is None: - return (0.0, RuntimeError("The branch is not specified.")) + return (0.0, [RuntimeError("The branch is not specified.")]) res = subprocess.run(["git", "diff", "--cached", "--quiet", "--exit-code"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) if res.returncode != 0: raise RuntimeError("There are currently staged changes. Please unstage to proceed.") try: subprocess.run(["git","push","origin",f"HEAD:{branch}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) - return (0.0, RuntimeError(f"The branch {branch} is not protected!")) + return (0.0, [RuntimeError(f"The branch {branch} is not protected!")]) except: return (1.0, []) @@ -440,9 +440,9 @@ def coveralls_reporter(configuration: dict[str, yaml]) -> tuple[float, list[Exce def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: validators = configuration.get("validators",None) if validators is None: - return (1.0, Warning("No validators were given, returning the void-validator.")) + return (1.0, [Warning("No validators were given, returning the void-validator.")]) elif not isinstance(validators,list): - return (0.0, TypeError("The list of validators must be given as list.")) + return (0.0, [TypeError("The list of validators must be given as list.")]) scores = [] exceptions = [] weights = [] @@ -450,16 +450,16 @@ def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | # fetch configuration validator_configuration = validator.get("configuration", None) if not isinstance(validator_configuration,dict[str, yaml]): - return (0.0, TypeError("Validator configuration must be an object.")) + return (0.0, [TypeError("Validator configuration must be an object.")]) # fetch weight weight = float(validator.get("weight",1.0)) if weight<0: - return (0.0, TypeError("Validator weights must be non-negative.")) + return (0.0, [TypeError("Validator weights must be non-negative.")]) weights.append(weight) # fetch type validator_type = validator.get("type", None) if validator_type is None: - return (0.0, TypeError("Missing validator type declaration.")) + return (0.0, [TypeError("Missing validator type declaration.")]) # execute validator if validator_type == "check_artifact_exists": validator_score, validator_errors = check_artifact_exists(validator_configuration) From 6eb46a1a378af51a02286e72be7baec69c831f93 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 12:42:05 +0000 Subject: [PATCH 43/67] add documentation for combinator --- .dotstop_extensions/README.md | 57 +++++++++++++++++++++++++++++-- .dotstop_extensions/validators.py | 4 +++ 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 6e57029c78..3fdff27eec 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -193,7 +193,7 @@ references: This reference queries `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure+branch%3A{self._branch}` and collects the number of failed workflow runs as its content. Here, owner, repo and branch are the arguments given to the constructor of the reference. If no branch is specified, then all failures are collected, i.e. `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure` is queried. -In case the website is un-reachable, or the github layout changes drastically so that the number of failed workflow runs does not exists at the expected location, an error is thrown. +In case the website is un-reachable, or the github layout changes drastically so that the number of failed workflow runs does not exist at the expected location, an error is thrown. The expected configuration is @@ -343,7 +343,7 @@ It is of utmost importance that the arguments come with quotation marks, otherwi The automatic validator `is_branch_protected` tries to push to the specified branch, i.e. to execute the command `git push origin HEAD:{branch}`. In case any changes are staged during the execution of the validator, an error is thrown before the push occurs. -Since the validator is intended to be executed during a workflow run, where no change is staged, it is no expected that the error is thrown. +Since the validator is intended to be executed during a workflow run, where no change is staged, it is not expected that the error is thrown. The expected configuration is given as follows: @@ -379,6 +379,59 @@ evidence: digits: 3 ``` +## combinator + +The automatic validator `combinator` is a meta-validator that executes multiple validators and combines their scores using a weighted average. This enables the validation of complex trustable items that require evidence from multiple sources or validation methods. + +The combinator accepts a list of validators, each with its own configuration and optional weight. Each validator is executed independently, and their scores are combined using the formula: `(score1 * weight1 + score2 * weight2 + ...) / (weight1 + weight2 + ...)`. If no weights are specified, all validators are treated with equal weight (weight = 1.0). + +The combinator supports the following validator types: +- `check_artifact_exists` +- `https_response_time` +- `check_test_results` +- `file_exists` +- `sha_checker` +- `check_issues` +- `did_workflows_fail` +- `is_branch_protected` +- `coveralls_reporter` + +The expected configuration is as follows: + +``` +evidence: + type: combinator + configuration: + validators: + - type: "check_test_results" + weight: 2.0 # optional, defaults to 1.0 + configuration: + tests: + - class_lexer + - unicode1 + - type: "https_response_time" + weight: 1.0 # optional, defaults to 1.0 + configuration: + target_seconds: 2 + urls: + - "https://github.com/nlohmann/json/issues" + - type: "coveralls_reporter" + weight: 1.5 # optional, defaults to 1.0 + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 + - type: "did_workflows_fail" + configuration: + owner: "eclipse-score" + repo: "inc_nlohmann_json" + branch: "json_version_3_12_0" +``` + +All weights must be non-negative. If the sum of all weights is zero, the combinator returns a score of 0.0. The combinator aggregates all exceptions and warnings from the individual validators and returns them alongside the combined score. # Data store interface diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index afcd6027b7..36806e1a78 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -493,6 +493,10 @@ def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | validator_score, validator_errors = is_branch_protected(validator_configuration) scores.append(validator_score) exceptions.extend(validator_errors) + elif validator_type == "coveralls_reporter": + validator_score, validator_errors = coveralls_reporter(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) if sum(weights) == 0.0: return (0.0, exceptions) else: From 8ca25c4518a634cca2b607a8db227325f20a8251 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 14:08:35 +0000 Subject: [PATCH 44/67] fix script path error --- .github/workflows/publish_documentation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml index b18158308e..2fa89d4ac8 100644 --- a/.github/workflows/publish_documentation.yml +++ b/.github/workflows/publish_documentation.yml @@ -112,7 +112,7 @@ jobs: gh issue list --repo "$REPO" --state closed --limit 10000 --json number,title,state,createdAt,url,labels > raw_closed_issues.json # print the list of misbehaviours - python3 ./TSF/scripts/generate_list_of_misbehaviours.py > $OUTPUT_FILE + python3 TSF/scripts/generate_list_of_misbehaviours.py > $OUTPUT_FILE # Push misbehaviours file to save_historical_data branch git add TSF/misbehaviours.md From 5dcbaecbebd594b2e77c9bfe4ec17418f6a07471 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 14:08:56 +0000 Subject: [PATCH 45/67] fix f string error --- .dotstop_extensions/references.py | 4 ++-- .dotstop_extensions/validators.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index d64303d656..ebe88280ed 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -822,7 +822,7 @@ def content(self) -> bytes: # build the url url = f"https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure" if self._branch is not None: - url += "+branch%3A{self._branch}" + url += f"+branch%3A{self._branch}" # ask the website res = requests.get(url) # if call is not successful, raise an error @@ -837,7 +837,7 @@ def content(self) -> bytes: return m.group(1).encode('utf-8') def as_markdown(self, filepath: None | str = None) -> str: - if self._branch is not None: + if self._branch is None: return f"{self.content.decode('utf-8')} workflows failed on {self._owner}/{self._repo}" else: return f"{self.content.decode('utf-8')} workflows failed on branch {self._branch} of {self._owner}/{self._repo}" diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index 36806e1a78..f751bbdc17 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -367,7 +367,7 @@ def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exce url = f"https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure" branch = configuration.get("branch",None) if branch is not None: - url += "+branch%3A{branch}" + url += f"+branch%3A{branch}" res = requests.get(url) if res.status_code != 200: return (0.0, [RuntimeError(f"The website {url} can not be successfully reached!")]) From 5eb66ce9270c16f457194d5b5f98ebb0b6501ff8 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 14:11:46 +0000 Subject: [PATCH 46/67] escape outer quotation marks --- TSF/scripts/generate_list_of_misbehaviours.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TSF/scripts/generate_list_of_misbehaviours.py b/TSF/scripts/generate_list_of_misbehaviours.py index 485f0005c8..901035e5bb 100644 --- a/TSF/scripts/generate_list_of_misbehaviours.py +++ b/TSF/scripts/generate_list_of_misbehaviours.py @@ -33,13 +33,13 @@ print("## Open issues\n") for issue in relevant_open_issues: - print(f"### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n") + print(f'### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n') comment_nlohmann_misbehaviours(int(issue.get("number"))) print("\n") print(f"\n## Closed Issues (since version {version}\n") for issue in relevant_closed_issues: - print(f"### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n") + print(f'### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n') comment_nlohmann_misbehaviours(int(issue.get("number"))) print("\n") From 7329df4f16a2b54c002a387ba1c2b17b56b8807a Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 14:12:56 +0000 Subject: [PATCH 47/67] fix typo --- TSF/trustable/assumptions-of-use/AOU-29.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/assumptions-of-use/AOU-29.md b/TSF/trustable/assumptions-of-use/AOU-29.md index 971e8fa829..a422f93ec1 100644 --- a/TSF/trustable/assumptions-of-use/AOU-29.md +++ b/TSF/trustable/assumptions-of-use/AOU-29.md @@ -3,4 +3,4 @@ level: 1.1 normative: true --- -The integrator evaluate the necessity of monitoring mechanisms and implement these together with a persistent storage of monitoring data as needed. \ No newline at end of file +The integrator shall evaluate the necessity of monitoring mechanisms and implement these together with a persistent storage of monitoring data as needed. \ No newline at end of file From 524e092844043a12de227ec75ff6bd63dfb98167 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 14:14:07 +0000 Subject: [PATCH 48/67] undo JLS-05 change --- TSF/trustable/statements/JLS-05.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-05.md b/TSF/trustable/statements/JLS-05.md index b1e273beb4..70714a9622 100644 --- a/TSF/trustable/statements/JLS-05.md +++ b/TSF/trustable/statements/JLS-05.md @@ -38,4 +38,4 @@ score: Erikhu1: 1.0 --- -The OSS nlohmann/json is widely used and actively maintained; bugs and misbehaviours are tracked publicly and transparently. \ No newline at end of file +The OSS nlohmann/json is widely used, actively maintained and uses github issues to track bugs and misbehaviours. \ No newline at end of file From 5015cdee65d4aa788b406f580c4a8524339d503f Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Tue, 4 Nov 2025 14:35:18 +0000 Subject: [PATCH 49/67] rename checklist files --- .../assertions/{TA-Analysis.md => TA-ANALYSIS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-ANALYSIS.md | 2 +- .../assertions/{TA-Behaviours.md => TA-BEHAVIOURS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-BEHAVIOURS.md | 2 +- .../assertions/{TA-Confidence.md => TA-CONFIDENCE-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-CONFIDENCE.md | 2 +- .../{TA-Constraints.md => TA-CONSTRAINTS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-CONSTRAINTS.md | 2 +- TSF/trustable/assertions/{TA-Data.md => TA-DATA-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-DATA.md | 2 +- TSF/trustable/assertions/{TA-Fixes.md => TA-FIXES-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-FIXES.md | 2 +- .../assertions/{TA-Indicators.md => TA-INDICATORS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-INDICATORS.md | 2 +- .../assertions/{TA-Inputs.md => TA-INPUTS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-INPUTS.md | 2 +- .../assertions/{TA-Iterations.md => TA-ITERATIONS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-ITERATIONS.md | 2 +- .../{TA-Methodologies.md => TA-METHODOLOGIES-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-METHODOLOGIES.md | 2 +- .../{TA-Misbehaviours.md => TA-MISBEHAVIOURS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-MISBEHAVIOURS.md | 2 +- .../assertions/{TA-Releases.md => TA-RELEASES-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-RELEASES.md | 2 +- .../{TA-Supply-Chain.md => TA-SUPPLY_CHAIN-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-SUPPLY_CHAIN.md | 2 +- TSF/trustable/assertions/{TA-Tests.md => TA-TESTS-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-TESTS.md | 2 +- .../assertions/{TA-Updates.md => TA-UPDATES-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-UPDATES.md | 2 +- .../assertions/{TA-Validation.md => TA-VALIDATION-CHECKLIST.md} | 0 TSF/trustable/assertions/TA-VALIDATION.md | 2 +- 32 files changed, 16 insertions(+), 16 deletions(-) rename TSF/trustable/assertions/{TA-Analysis.md => TA-ANALYSIS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Behaviours.md => TA-BEHAVIOURS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Confidence.md => TA-CONFIDENCE-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Constraints.md => TA-CONSTRAINTS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Data.md => TA-DATA-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Fixes.md => TA-FIXES-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Indicators.md => TA-INDICATORS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Inputs.md => TA-INPUTS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Iterations.md => TA-ITERATIONS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Methodologies.md => TA-METHODOLOGIES-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Misbehaviours.md => TA-MISBEHAVIOURS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Releases.md => TA-RELEASES-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Supply-Chain.md => TA-SUPPLY_CHAIN-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Tests.md => TA-TESTS-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Updates.md => TA-UPDATES-CHECKLIST.md} (100%) rename TSF/trustable/assertions/{TA-Validation.md => TA-VALIDATION-CHECKLIST.md} (100%) diff --git a/TSF/trustable/assertions/TA-Analysis.md b/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Analysis.md rename to TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-ANALYSIS.md b/TSF/trustable/assertions/TA-ANALYSIS.md index 192061db14..d9f20b5e3d 100644 --- a/TSF/trustable/assertions/TA-ANALYSIS.md +++ b/TSF/trustable/assertions/TA-ANALYSIS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Analysis.md + path: ./TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md --- Collected data from tests and monitoring of deployed software is analysed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-Behaviours.md b/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Behaviours.md rename to TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS.md b/TSF/trustable/assertions/TA-BEHAVIOURS.md index 8a709edfab..fb527009d0 100644 --- a/TSF/trustable/assertions/TA-BEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-BEHAVIOURS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Behaviours.md + path: ./TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md --- Expected or required behaviours for nlohmann/json library are identified, specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-Confidence.md b/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Confidence.md rename to TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-CONFIDENCE.md b/TSF/trustable/assertions/TA-CONFIDENCE.md index 964e14cebe..206733dfc2 100644 --- a/TSF/trustable/assertions/TA-CONFIDENCE.md +++ b/TSF/trustable/assertions/TA-CONFIDENCE.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Confidence.md + path: ./TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md --- Confidence in nlohmann/json library is measured based on results of analysis. diff --git a/TSF/trustable/assertions/TA-Constraints.md b/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Constraints.md rename to TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS.md b/TSF/trustable/assertions/TA-CONSTRAINTS.md index f75ef9d10a..76f8d2bb54 100644 --- a/TSF/trustable/assertions/TA-CONSTRAINTS.md +++ b/TSF/trustable/assertions/TA-CONSTRAINTS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Constraints.md + path: ./TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md --- Constraints on adaptation and deployment of nlohmann/json library are specified. diff --git a/TSF/trustable/assertions/TA-Data.md b/TSF/trustable/assertions/TA-DATA-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Data.md rename to TSF/trustable/assertions/TA-DATA-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-DATA.md b/TSF/trustable/assertions/TA-DATA.md index 5b227f9143..2e69be7b80 100644 --- a/TSF/trustable/assertions/TA-DATA.md +++ b/TSF/trustable/assertions/TA-DATA.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Data.md + path: ./TSF/trustable/assertions/TA-DATA-CHECKLIST.md --- Data is collected from tests, and from monitoring of deployed software, according to specified objectives. diff --git a/TSF/trustable/assertions/TA-Fixes.md b/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Fixes.md rename to TSF/trustable/assertions/TA-FIXES-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-FIXES.md b/TSF/trustable/assertions/TA-FIXES.md index 402c111376..435cf89f77 100644 --- a/TSF/trustable/assertions/TA-FIXES.md +++ b/TSF/trustable/assertions/TA-FIXES.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Fixes.md + path: ./TSF/trustable/assertions/TA-FIXES-CHECKLIST.md --- Known bugs or misbehaviours are analysed and triaged, and critical fixes or mitigations are implemented or applied. diff --git a/TSF/trustable/assertions/TA-Indicators.md b/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Indicators.md rename to TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-INDICATORS.md b/TSF/trustable/assertions/TA-INDICATORS.md index f742f694d1..3aeced44cc 100644 --- a/TSF/trustable/assertions/TA-INDICATORS.md +++ b/TSF/trustable/assertions/TA-INDICATORS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Indicators.md + path: ./TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md --- Advanced warning indicators for misbehaviours are identified, and monitoring mechanisms are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-Inputs.md b/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Inputs.md rename to TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-INPUTS.md b/TSF/trustable/assertions/TA-INPUTS.md index 6284d05cad..c1a2339f53 100644 --- a/TSF/trustable/assertions/TA-INPUTS.md +++ b/TSF/trustable/assertions/TA-INPUTS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Inputs.md + path: ./TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md --- All inputs to nlohmann/json library are assessed, to identify potential risks and issues. diff --git a/TSF/trustable/assertions/TA-Iterations.md b/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Iterations.md rename to TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-ITERATIONS.md b/TSF/trustable/assertions/TA-ITERATIONS.md index 65e0ac14c8..7e9e6ac5d8 100644 --- a/TSF/trustable/assertions/TA-ITERATIONS.md +++ b/TSF/trustable/assertions/TA-ITERATIONS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Iterations.md + path: ./TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md --- All constructed iterations of nlohmann/json library include source code, build instructions, tests, results and attestations. diff --git a/TSF/trustable/assertions/TA-Methodologies.md b/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Methodologies.md rename to TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES.md b/TSF/trustable/assertions/TA-METHODOLOGIES.md index 938f780e14..7e5a3cac56 100644 --- a/TSF/trustable/assertions/TA-METHODOLOGIES.md +++ b/TSF/trustable/assertions/TA-METHODOLOGIES.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Methodologies.md + path: ./TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md --- Manual methodologies applied for nlohmann/json library by contributors, and their results, are managed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-Misbehaviours.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Misbehaviours.md rename to TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md index b3ded4e5da..dc178ff789 100644 --- a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Misbehaviours.md + path: ./TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md --- Prohibited misbehaviours for nlohmann/json library are identified, and mitigations are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-Releases.md b/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Releases.md rename to TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-RELEASES.md b/TSF/trustable/assertions/TA-RELEASES.md index f32550fa8e..8e1aa2c11e 100644 --- a/TSF/trustable/assertions/TA-RELEASES.md +++ b/TSF/trustable/assertions/TA-RELEASES.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Releases.md + path: ./TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md --- Construction of nlohmann/json library releases is fully repeatable and the results are fully reproducible, with any exceptions documented and justified. diff --git a/TSF/trustable/assertions/TA-Supply-Chain.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Supply-Chain.md rename to TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md index 134ee73d76..90f6b08f52 100644 --- a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Supply-Chain.md + path: ./TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md --- All sources for nlohmann/json library and tools are mirrored in our controlled environment. diff --git a/TSF/trustable/assertions/TA-Tests.md b/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Tests.md rename to TSF/trustable/assertions/TA-TESTS-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-TESTS.md b/TSF/trustable/assertions/TA-TESTS.md index fa737ec865..e78fea3221 100644 --- a/TSF/trustable/assertions/TA-TESTS.md +++ b/TSF/trustable/assertions/TA-TESTS.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Tests.md + path: ./TSF/trustable/assertions/TA-TESTS-CHECKLIST.md --- All tests for nlohmann/json library, and its build and test environments, are constructed from controlled/mirrored sources and are reproducible, with any exceptions documented. diff --git a/TSF/trustable/assertions/TA-Updates.md b/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Updates.md rename to TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-UPDATES.md b/TSF/trustable/assertions/TA-UPDATES.md index 69a72e3712..0113c23a4f 100644 --- a/TSF/trustable/assertions/TA-UPDATES.md +++ b/TSF/trustable/assertions/TA-UPDATES.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Updates.md + path: ./TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md --- nlohmann/json library components, configurations and tools are updated under specified change and configuration management controls. diff --git a/TSF/trustable/assertions/TA-Validation.md b/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md similarity index 100% rename from TSF/trustable/assertions/TA-Validation.md rename to TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md diff --git a/TSF/trustable/assertions/TA-VALIDATION.md b/TSF/trustable/assertions/TA-VALIDATION.md index fd48b70293..6aba0b230c 100644 --- a/TSF/trustable/assertions/TA-VALIDATION.md +++ b/TSF/trustable/assertions/TA-VALIDATION.md @@ -3,7 +3,7 @@ level: 1.1 normative: true references: - type: checklist - path: ./TSF/trustable/assertions/TA-Validation.md + path: ./TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md --- All specified tests are executed repeatedly, under defined conditions in controlled environments, according to specified objectives. From 54b2006b0028d8446074f89c1dc25c58050810e0 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Wed, 5 Nov 2025 09:50:36 +0000 Subject: [PATCH 50/67] label position agnostic script and fix typos --- TSF/scripts/generate_list_of_misbehaviours.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/TSF/scripts/generate_list_of_misbehaviours.py b/TSF/scripts/generate_list_of_misbehaviours.py index 901035e5bb..c6cac25d12 100644 --- a/TSF/scripts/generate_list_of_misbehaviours.py +++ b/TSF/scripts/generate_list_of_misbehaviours.py @@ -13,13 +13,13 @@ all_open_issues = json.load(list_1) relevant_open_issues = [all_open_issues[i] for i in range(0,len(all_open_issues)) if len(all_open_issues[i].get("labels",[]))!=0 - and (all_open_issues[i].get("labels"))[0].get("name") == "kind: bug" + and any(label.get("name") == "kind: bug" for label in all_open_issues[i].get("labels", [])) ] with open("raw_closed_issues.json") as list_2: all_closed_issues = json.load(list_2) relevant_closed_issues = [all_closed_issues[i] for i in range(0,len(all_closed_issues)) if len(all_closed_issues[i].get("labels",[]))!=0 - and (all_closed_issues[i].get("labels"))[0].get("name") == "kind: bug" + and any(label.get("name") == "kind: bug" for label in all_closed_issues[i].get("labels", [])) and datetime.strptime(all_closed_issues[i].get("createdAt","2000-01-01T00:00:00Z"),"%Y-%m-%dT%H:%M:%SZ") .replace(tzinfo=timezone.utc) .timestamp() @@ -33,13 +33,13 @@ print("## Open issues\n") for issue in relevant_open_issues: - print(f'### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n') + print(f"### [#{issue.get('number')}]({issue.get('url')})\n- **Title:** {issue.get('title')}\n- **State:** {issue.get('state')}\n- **Created At:** {issue.get('createdAt')}\n") comment_nlohmann_misbehaviours(int(issue.get("number"))) print("\n") - print(f"\n## Closed Issues (since version {version}\n") + print(f"\n## Closed Issues (since version {version})\n") for issue in relevant_closed_issues: - print(f'### [#{issue.get("number")}]({issue.get("url")})\n- **Title:** {issue.get("title")}\n- **State:** {issue.get("state")}\n- **Created At:** {issue.get("createdAt")}\n') + print(f"### [#{issue.get('number')}]({issue.get('url')})\n- **Title:** {issue.get('title')}\n- **State:** {issue.get('state')}\n- **Created At:** {issue.get('createdAt')}\n") comment_nlohmann_misbehaviours(int(issue.get("number"))) print("\n") From b9e046fe838a61f6983f4602f8b96fb70c0a7dfd Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Thu, 6 Nov 2025 18:56:52 +0100 Subject: [PATCH 51/67] Update .dotstop_extensions/README.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Luca Füger Signed-off-by: Erik Hu --- .dotstop_extensions/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 3fdff27eec..3779c3076e 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -337,7 +337,7 @@ evidence: action: "push" # optional, default is push ``` -It is of utmost importance that the arguments come with quotation marks, otherwise, the update helper does not work as intended. +It is of utmost importance that the arguments come with quotation marks. Otherwise, the update helper does not work as intended. ## is_branch_protected From c823fcdb9a120bddf0befd742deabf76ccc3fc4f Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Thu, 6 Nov 2025 18:57:12 +0100 Subject: [PATCH 52/67] Update .dotstop_extensions/README.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Luca Füger Signed-off-by: Erik Hu --- .dotstop_extensions/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 3779c3076e..be21c796c8 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -354,7 +354,7 @@ evidence: branch: "json_version_3_12_0" # name of the branch ``` -It is of utmost importance that the arguments come with quotation marks, otherwise, the update helper does not work as intended. +It is of utmost importance that the arguments come with quotation marks. Otherwise, the update helper does not work as intended. ## coveralls_reporter From 100999b4a0f85a706f38d69aa9d1f9c984f11c73 Mon Sep 17 00:00:00 2001 From: Jonas-Kirchhoff Date: Thu, 6 Nov 2025 19:16:08 +0100 Subject: [PATCH 53/67] fix verb --- TSF/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/README.md b/TSF/README.md index 351e08acfb..29510230ee 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -217,7 +217,7 @@ It is highly recommended to not delete SME scores under usual circumstances; mos The following unusual circumstances can, after careful consideration, justify the removal or (much preferably!) the request for re-evaluation by the original SMEs: * change of references: - If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) change, then this should trigger a re-evaluation of the statement. + If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) changes, then this should trigger a re-evaluation of the statement. In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. * addition of automatic validators: Recall that the SME judges in the absence of an automatic validator the validity of the statement using their own knowledge as well as the provided references, while in the presence of an automatic validator the validity of the validator score to represent the true score of the item, which is estimated as in the case of no validator, is judged. From cf2effe39adb15ac182d21743b3d88cdac44796c Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 09:02:05 +0000 Subject: [PATCH 54/67] rename NumberOfFailures to workflow_failures --- .dotstop_extensions/README.md | 2 +- .dotstop_extensions/references.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index be21c796c8..de7285c1eb 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -188,7 +188,7 @@ references: --- ``` -## NumberOfFailures +## workflow_failures This reference queries `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure+branch%3A{self._branch}` and collects the number of failed workflow runs as its content. Here, owner, repo and branch are the arguments given to the constructor of the reference. diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index ebe88280ed..00a5c45b87 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -807,7 +807,7 @@ def __str__(self) -> str: del LFR -class NumberOfFailures(BaseReference): +class workflow_failures(BaseReference): def __init__(self, owner: str, repo: str, branch: str | None = None) -> None: self._owner = owner self._repo = repo From d65a9e90528ed3908ab88c2412677c96cd09ee93 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 09:07:50 +0000 Subject: [PATCH 55/67] clarify overload nth occurence --- .dotstop_extensions/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index de7285c1eb..d768058fe9 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -69,7 +69,7 @@ references: ``` Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter. -The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: +The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation when counting the occurrences from top to bottom, of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: ``` --- ... From 53aa7df3f4bacad5369e0a99213b48b793ee5df3 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 09:55:14 +0000 Subject: [PATCH 56/67] update JLS-26 --- TSF/trustable/statements/JLS-06.md | 14 ++------------ TSF/trustable/statements/JLS-26.md | 5 +++-- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/TSF/trustable/statements/JLS-06.md b/TSF/trustable/statements/JLS-06.md index 9d1ab173ba..33f7ea037d 100644 --- a/TSF/trustable/statements/JLS-06.md +++ b/TSF/trustable/statements/JLS-06.md @@ -3,19 +3,9 @@ level: 1.1 normative: true references: - type: workflow_failures - owner: "score-json" + owner: "nlohmann" repo: "json" - branch: "main" -evidence: - type: "check_artifact_exists" - configuration: - check_amalgamation: include - codeql: include - dependency_review: include - labeler: include - publish_documentation: include - test_trudag_extensions: include - ubuntu: include + branch: "master" --- Changes to the code (main branch) are applied only after code review and passing of all pipelines. diff --git a/TSF/trustable/statements/JLS-26.md b/TSF/trustable/statements/JLS-26.md index 59a5717646..7aa421b105 100644 --- a/TSF/trustable/statements/JLS-26.md +++ b/TSF/trustable/statements/JLS-26.md @@ -3,11 +3,12 @@ level: 1.1 normative: true references: - type: workflow_failures - owner: "score-json" + owner: "nlohmann" repo: "json" + branch: "master" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 --- -Pipeline execution results are analyzed in the fork and the original nlohmann/json repository. \ No newline at end of file +Pipeline execution results are analyzed in nlohmann/json. \ No newline at end of file From 500d934b67f08b6bd50913b06efa34b903f82b08 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 10:01:44 +0000 Subject: [PATCH 57/67] update JLS-27 --- TSF/trustable/statements/JLS-27.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/statements/JLS-27.md b/TSF/trustable/statements/JLS-27.md index 2fb344db9d..62cc61cd9b 100644 --- a/TSF/trustable/statements/JLS-27.md +++ b/TSF/trustable/statements/JLS-27.md @@ -12,4 +12,4 @@ evidence: digits: 3 --- -The test coverage for this version of nlohmann/json is monitored by coveralls and stays constant. \ No newline at end of file +The test coverage for this version of nlohmann/json is monitored using Coveralls and is not decreasing over time, unless reasonably justified. \ No newline at end of file From 49ba9a1d7cba94a78df701e00a4dc8d7ce8bdd66 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 10:06:52 +0000 Subject: [PATCH 58/67] clarify combinator --- .dotstop_extensions/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index d768058fe9..176002db02 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -381,7 +381,7 @@ evidence: ## combinator -The automatic validator `combinator` is a meta-validator that executes multiple validators and combines their scores using a weighted average. This enables the validation of complex trustable items that require evidence from multiple sources or validation methods. +The trudag tool does currently not support the use of multiple custom validators for one single TSF item. To work around this, the validator `combinator` is implemented as a meta-validator that executes multiple validators and combines their scores using a weighted average. This enables the validation of complex trustable items that require evidence from multiple sources or validation methods. The combinator accepts a list of validators, each with its own configuration and optional weight. Each validator is executed independently, and their scores are combined using the formula: `(score1 * weight1 + score2 * weight2 + ...) / (weight1 + weight2 + ...)`. If no weights are specified, all validators are treated with equal weight (weight = 1.0). From ebdfea7c3f455ba9f8a413a279ddf11621e26959 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 12:12:40 +0000 Subject: [PATCH 59/67] add reference test for ListOfTestCases --- .dotstop_extensions/test_references.py | 161 ++++++++++++++++++++++++- 1 file changed, 160 insertions(+), 1 deletion(-) diff --git a/.dotstop_extensions/test_references.py b/.dotstop_extensions/test_references.py index f558fc80f8..71f8ce06a5 100644 --- a/.dotstop_extensions/test_references.py +++ b/.dotstop_extensions/test_references.py @@ -2,7 +2,7 @@ import tempfile from pathlib import Path from unittest.mock import patch -from references import CPPTestReference, JSONTestsuiteReference, FunctionReference, ItemReference +from references import CPPTestReference, JSONTestsuiteReference, FunctionReference, ItemReference, ListOfTestCases from validators import file_exists @@ -697,3 +697,162 @@ def test_file_exists(tmp_path): assert score == 2/4 assert any(isinstance(exception,Warning) for exception in exceptions) assert any(isinstance(exception,RuntimeError) for exception in exceptions) + +# ListOfTestCases tests +@pytest.fixture +def sample_unit_test_content(): + """Sample unit test file content for testing ListOfTestCases.""" + return '''TEST_CASE("basic arithmetic") +{ + SECTION("addition") + { + CHECK(1 + 1 == 2); + + SECTION("positive numbers") + { + CHECK(5 + 3 == 8); + } + } + + SECTION("multiplication") + { + CHECK(2 * 3 == 6); + } +} + +TEST_CASE("another test") +{ + CHECK(true); +} +''' + +@pytest.fixture +def temp_unit_test_file(sample_unit_test_content, tmp_path): + """Create a temporary unit test file.""" + test_file = tmp_path / "unit-sample.cpp" + test_file.write_text(sample_unit_test_content) + return test_file + +def test_list_of_test_cases_type_classmethod(): + """Test the type class method.""" + assert ListOfTestCases.type() == "list_of_test_cases" + +def test_compile_string(): + """Test compile_string static method.""" + # Test single item + result = ListOfTestCases.compile_string(["test_case"]) + assert result == "* test_case" + + # Test nested items + result = ListOfTestCases.compile_string(["test_case", "section1", "section2"]) + assert result == " * section2" + + # Test proper indentation + result = ListOfTestCases.compile_string(["test_case", "section"]) + assert result == " * section" + +def test_compile_string_empty_list(): + """Test compile_string with empty list raises error.""" + with pytest.raises(RuntimeError, match="Received empty structural list; nonempty list expected."): + ListOfTestCases.compile_string([]) + +def test_extract_quotation(): + """Test extract_quotation static method.""" + # Test basic extraction + result = ListOfTestCases.extract_quotation('TEST_CASE("my test")') + assert result == "my test" + + # Test with multiple quotes (should return first) + result = ListOfTestCases.extract_quotation('SECTION("section1") and "section2"') + assert result == "section1" + +def test_extract_quotation_no_quotes(): + """Test extract_quotation with no quotes raises error.""" + with pytest.raises(RuntimeError, match="Expected quotation mark; none were detected."): + ListOfTestCases.extract_quotation("no quotes here") + +def test_extract_quotation_single_quote(): + """Test extract_quotation with single quote raises error.""" + with pytest.raises(RuntimeError, match="Expected quotation marks; only one was detected."): + ListOfTestCases.extract_quotation('single quote"') + +def test_remove_and_count_indent(): + """Test remove_and_count_indent static method.""" + # Test spaces + count, text = ListOfTestCases.remove_and_count_indent(" hello") + assert count == 4 + assert text == "hello" + + # Test tabs (4 spaces each) + count, text = ListOfTestCases.remove_and_count_indent("\t\thello") + assert count == 8 + assert text == "hello" + + # Test mixed tabs and spaces + count, text = ListOfTestCases.remove_and_count_indent("\t hello") + assert count == 6 + assert text == "hello" + + # Test no indentation + count, text = ListOfTestCases.remove_and_count_indent("hello") + assert count == 0 + assert text == "hello" + +def test_head_of_list(): + """Test head_of_list static method.""" + result = ListOfTestCases.head_of_list() + assert "## List of all unit-tests with test environments" in result + assert "TEST_CASEs" in result + assert "SECTIONs" in result + +def test_transform_test_file_to_test_name(): + """Test transform_test_file_to_test_name static method.""" + result = ListOfTestCases.transform_test_file_to_test_name("unit-example-test.cpp") + assert result == "test-example-test" + + result = ListOfTestCases.transform_test_file_to_test_name("unit-simple.cpp") + assert result == "test-simple" + +def test_extract_test_structure(temp_unit_test_file): + """Test extract_test_structure method.""" + list_ref = ListOfTestCases([]) + result = list_ref.extract_test_structure(temp_unit_test_file) + + # Should contain test cases and sections + assert "* basic arithmetic" in result + assert " * addition" in result + assert " * positive numbers" in result + assert " * multiplication" in result + assert "* another test" in result + +def test_extract_test_structure_empty_file(tmp_path): + """Test extract_test_structure with empty file.""" + empty_file = tmp_path / "empty.cpp" + empty_file.write_text("") + + list_ref = ListOfTestCases([]) + result = list_ref.extract_test_structure(empty_file) + assert result == "" + +def test_list_of_test_cases_init(): + """Test ListOfTestCases initialization.""" + test_files = ["tests/unit-test1.cpp", "tests/unit-test2.cpp"] + list_ref = ListOfTestCases(test_files, "custom.db", "custom_table") + + assert list_ref._test_files == test_files + assert list_ref._database == "custom.db" + assert list_ref._table == "custom_table" + +def test_list_of_test_cases_init_defaults(): + """Test ListOfTestCases initialization with default parameters.""" + test_files = ["tests/unit-test1.cpp"] + list_ref = ListOfTestCases(test_files) + + assert list_ref._test_files == test_files + assert list_ref._database == "artifacts/MemoryEfficientTestResults.db" + assert list_ref._table == "test_results" + +def test_str_method(): + """Test __str__ method.""" + list_ref = ListOfTestCases(["test_file"]) + assert str(list_ref) == "List of all unit-tests" From 619ba3cfe27a0d68bd22ddb53cc9032c2f8d730c Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 12:15:20 +0000 Subject: [PATCH 60/67] remove comma --- .dotstop_extensions/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 176002db02..8294fc0ed7 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -69,7 +69,7 @@ references: ``` Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter. -The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation when counting the occurrences from top to bottom, of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: +The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation when counting the occurrences from top to bottom of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: ``` --- ... From b1ac2cc7ba07f60c8555d393824fc96f07ab0c0a Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 12:31:37 +0000 Subject: [PATCH 61/67] add function description and close connection --- .dotstop_extensions/references.py | 70 +++++++++++++++++++++---------- 1 file changed, 49 insertions(+), 21 deletions(-) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index 00a5c45b87..f88cf515eb 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -660,7 +660,31 @@ def extract_test_structure(self, file_path: Path) -> str: return ("\n".join(lines_out) + "\n") if lines_out else "" def extract_recent_test_environments(self) -> dict: + """ + Extract recent test environment information from the test results database. + + This method connects to the SQLite database specified in self._database and queries + the table specified in self._table to retrieve information about test environments + where unit tests were executed. It categorizes the results into tests that ran + without skipping any test cases ('noskip') and tests where some cases were skipped ('skip'). + + The database is expected to have a table with columns: + - name: test file name (e.g., "test-example") + - compiler: compiler used (e.g., "gcc", "clang") + - cpp_standard: C++ standard used (e.g., "c++17", "c++20") + - skipped_cases: number of test cases that were skipped (0 means no skips) + + Returns: + dict: A dictionary where keys are test case names and values are dictionaries containing: + - "noskip": list of environments where all tests ran (no skipped cases) + - "skip": list of environments where some tests were skipped + Each environment entry contains compiler, standard, and (for skip) skipped count. + + Raises: + RuntimeError: If the database cannot be accessed or the expected table doesn't exist + """ fetched_data = dict() + connector = None try: # initialise connection to test result database connector = sqlite3.connect(self._database) @@ -669,29 +693,33 @@ def extract_recent_test_environments(self) -> dict: cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name = ?;",(self._table,)) if cursor.fetchone() is None: raise RuntimeError(f"Fatal Error: Could not find table {self._table} in database {self._database}.") + + # get all test-files from recent test executions + command = f"SELECT name FROM {self._table};" + cursor.execute(command) + raw_cases = cursor.fetchall() + cases = set([raw_case[0] for raw_case in raw_cases]) + # for each test-file + for case in cases: + case_data = dict() + # get the test-environments + command = f"SELECT compiler, cpp_standard FROM {self._table} WHERE name = ? and skipped_cases == 0" + cursor.execute(command,(case,)) + results = cursor.fetchall() + case_data["noskip"] = [{"compiler":result[0], "standard":result[1]} for result in results] + # some test-cases are skipped with certain environments + # It is unclear from the log, which cases are skipped; + # we leave this to the interested reader + command = f"SELECT compiler, cpp_standard, skipped_cases FROM {self._table} WHERE name = ? and skipped_cases != 0" + cursor.execute(command, (case,)) + results = cursor.fetchall() + case_data["skip"] = [{"compiler": result[0], "standard": result[1], "skipped": result[2]} for result in results] + fetched_data[case] = case_data except sqlite3.Error as e: raise RuntimeError(f"Fatal Error accessing database {self._database}: {e}") - # get all test-files from recent test executions - command = f"SELECT name FROM {self._table};" - cursor.execute(command) - raw_cases = cursor.fetchall() - cases = set([raw_case[0] for raw_case in raw_cases]) - # for each test-file - for case in cases: - case_data = dict() - # get the test-environments - command = f"SELECT compiler, cpp_standard FROM {self._table} WHERE name = ? and skipped_cases == 0" - cursor.execute(command,(case,)) - results = cursor.fetchall() - case_data["noskip"] = [{"compiler":result[0], "standard":result[1]} for result in results] - # some test-cases are skipped with certain environments - # It is unclear from the log, which cases are skipped; - # we leave this to the interested reader - command = f"SELECT compiler, cpp_standard, skipped_cases FROM {self._table} WHERE name = ? and skipped_cases != 0" - cursor.execute(command, (case,)) - results = cursor.fetchall() - case_data["skip"] = [{"compiler": result[0], "standard": result[1], "skipped": result[2]} for result in results] - fetched_data[case] = case_data + finally: + if connector: + connector.close() return fetched_data def fetch_all_test_data(self, input: list[str]): From fe90d8045b9678dc6163a018118a70cb7d5e183e Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 12:34:42 +0000 Subject: [PATCH 62/67] add function desc --- .dotstop_extensions/references.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py index f88cf515eb..80fb96d76e 100644 --- a/.dotstop_extensions/references.py +++ b/.dotstop_extensions/references.py @@ -723,6 +723,34 @@ def extract_recent_test_environments(self) -> dict: return fetched_data def fetch_all_test_data(self, input: list[str]): + """ + Extract and compile test structure information from C++ test files along with execution environment data. + + This method processes a list of file or directory paths to find C++ unit test files (matching pattern + "unit-*.cpp"), extracts their TEST_CASE and SECTION structure, and combines this with recent test + execution environment information from the database to generate a comprehensive markdown report. + + The method recursively searches directories for test files, parses each file to extract the hierarchical + test structure (TEST_CASE containing nested SECTIONs), and correlates this with historical execution + data to show which compiler/standard combinations were used to run the tests. + + Args: + input: List of file or directory paths to process. Files must match "unit-*.cpp" pattern. + Directories are recursively searched for matching test files. + + Returns: + str: A markdown-formatted report containing: + - Header explaining the test structure format + - For each test file: nested bullet lists showing TEST_CASE and SECTION hierarchy + - Execution environment information showing which compiler/standard combinations + successfully ran all tests vs. which had some test cases skipped + - Notes about files that appear to have no recent execution history + + Note: + The method relies on extract_recent_test_environments() to get database information + and extract_test_structure() to parse individual test files. Test file names are + transformed using transform_test_file_to_test_name() to match database entries. + """ # inputs: path(s) to directory potentially containing some test-data extracted_test_data = [] recent_test_data = self.extract_recent_test_environments() From 862cb1967b6907141bd6111d6eb1088cac33b64b Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 14:32:15 +0000 Subject: [PATCH 63/67] remove is_branch_protected validator --- .dotstop_extensions/README.md | 18 ------------------ .dotstop_extensions/validators.py | 17 ----------------- TSF/trustable/statements/JLS-07.md | 4 ---- 3 files changed, 39 deletions(-) diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md index 8294fc0ed7..ae14fb01ad 100644 --- a/.dotstop_extensions/README.md +++ b/.dotstop_extensions/README.md @@ -339,23 +339,6 @@ evidence: It is of utmost importance that the arguments come with quotation marks. Otherwise, the update helper does not work as intended. -## is_branch_protected - -The automatic validator `is_branch_protected` tries to push to the specified branch, i.e. to execute the command `git push origin HEAD:{branch}`. -In case any changes are staged during the execution of the validator, an error is thrown before the push occurs. -Since the validator is intended to be executed during a workflow run, where no change is staged, it is not expected that the error is thrown. - -The expected configuration is given as follows: - -``` -evidence: - type: is_branch_protected - configuration: - branch: "json_version_3_12_0" # name of the branch -``` - -It is of utmost importance that the arguments come with quotation marks. Otherwise, the update helper does not work as intended. - ## coveralls_reporter The automatic validator `coveralls_reporter` queries the [coveralls](https://coveralls.io/) api to get the line and branch coverages calculated by the service, which is running on the repository. @@ -393,7 +376,6 @@ The combinator supports the following validator types: - `sha_checker` - `check_issues` - `did_workflows_fail` -- `is_branch_protected` - `coveralls_reporter` The expected configuration is as follows: diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index f751bbdc17..f0095b4b87 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -377,19 +377,6 @@ def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exce if m.group(1).strip() != "0": return (0.0, [Warning("There are failed workflows!")]) return (1.0, []) - -def is_branch_protected(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: - branch = configuration.get("branch",None) - if branch is None: - return (0.0, [RuntimeError("The branch is not specified.")]) - res = subprocess.run(["git", "diff", "--cached", "--quiet", "--exit-code"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) - if res.returncode != 0: - raise RuntimeError("There are currently staged changes. Please unstage to proceed.") - try: - subprocess.run(["git","push","origin",f"HEAD:{branch}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) - return (0.0, [RuntimeError(f"The branch {branch} is not protected!")]) - except: - return (1.0, []) def coveralls_reporter(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: owner = configuration.get("owner",None) @@ -489,10 +476,6 @@ def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | validator_score, validator_errors = did_workflows_fail(validator_configuration) scores.append(validator_score) exceptions.extend(validator_errors) - elif validator_type == "is_branch_protected": - validator_score, validator_errors = is_branch_protected(validator_configuration) - scores.append(validator_score) - exceptions.extend(validator_errors) elif validator_type == "coveralls_reporter": validator_score, validator_errors = coveralls_reporter(validator_configuration) scores.append(validator_score) diff --git a/TSF/trustable/statements/JLS-07.md b/TSF/trustable/statements/JLS-07.md index 4c327f2f77..0c8d55f74c 100644 --- a/TSF/trustable/statements/JLS-07.md +++ b/TSF/trustable/statements/JLS-07.md @@ -1,10 +1,6 @@ --- level: 1.1 normative: true -evidence: - type: is_branch_protected - configuration: - branch: "main" score: Jonas-Kirchhoff: 1.0 Erikhu1: 1.0 From fef09ca2ddab12c05237334c42ceacb1df6e5014 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 14:36:57 +0000 Subject: [PATCH 64/67] add try catch for did_workflows_fail --- .dotstop_extensions/validators.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py index f0095b4b87..2f5321cfef 100644 --- a/.dotstop_extensions/validators.py +++ b/.dotstop_extensions/validators.py @@ -368,9 +368,18 @@ def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exce branch = configuration.get("branch",None) if branch is not None: url += f"+branch%3A{branch}" - res = requests.get(url) + + try: + res = requests.get(url, timeout=30) # Add timeout to prevent hanging + except requests.exceptions.ConnectionError as e: + return (0.0, [RuntimeError(f"Connection error when accessing {url}: {e}")]) + except requests.exceptions.Timeout as e: + return (0.0, [RuntimeError(f"Timeout error when accessing {url}: {e}")]) + except requests.exceptions.RequestException as e: + return (0.0, [RuntimeError(f"Request error when accessing {url}: {e}")]) + if res.status_code != 200: - return (0.0, [RuntimeError(f"The website {url} can not be successfully reached!")]) + return (0.0, [RuntimeError(f"The website {url} can not be successfully reached! Status code: {res.status_code}")]) m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) if m is None: return (0.0, [RuntimeError("The number of failed workflows can not be found.")]) From 224c5e0de5c2a0aa6072ec2f50f33de71bc3ca1d Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 14:38:28 +0000 Subject: [PATCH 65/67] update AOU-29 --- TSF/trustable/assumptions-of-use/AOU-29.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TSF/trustable/assumptions-of-use/AOU-29.md b/TSF/trustable/assumptions-of-use/AOU-29.md index a422f93ec1..aba111340a 100644 --- a/TSF/trustable/assumptions-of-use/AOU-29.md +++ b/TSF/trustable/assumptions-of-use/AOU-29.md @@ -3,4 +3,4 @@ level: 1.1 normative: true --- -The integrator shall evaluate the necessity of monitoring mechanisms and implement these together with a persistent storage of monitoring data as needed. \ No newline at end of file +The integrator shall evaluate the necessity of monitoring mechanisms and implement these in score-json together with a persistent storage of monitoring data as needed. \ No newline at end of file From 2abbb2724fa25616bc2afa5ecd0c9fc17ba691b9 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 16:12:09 +0000 Subject: [PATCH 66/67] update SME scores description --- TSF/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/TSF/README.md b/TSF/README.md index 0ca6844024..cdaa754c69 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -221,8 +221,7 @@ The following unusual circumstances can, after careful consideration, justify th If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) changes, then this should trigger a re-evaluation of the statement. In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. * addition of automatic validators: - Recall that the SME judges in the absence of an automatic validator the validity of the statement using their own knowledge as well as the provided references, while in the presence of an automatic validator the validity of the validator score to represent the true score of the item, which is estimated as in the case of no validator, is judged. - If a new automatic validator is added, then the meaning of the old SME scores is no longer represented, thereby urging for a re-review or (if a re-review is impossible) the removal of the score. + Recall that the SME-scores have different meanings depending on whether or not an automatic validator is implemented. In the absence of a validator, the SME shall assess their confidence in the statement based on linked artifacts (references) and their own knowledge. In the presence of an automatic validator, the SME shall assess only their confidence in the validator as an accurate measure of the truth of the statement. ## Validators From aaaf8ed72038a7bd9f2cc9252a3cbe48912119f8 Mon Sep 17 00:00:00 2001 From: Erik Hu Date: Fri, 7 Nov 2025 16:30:52 +0000 Subject: [PATCH 67/67] remove automatic --- TSF/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/TSF/README.md b/TSF/README.md index cdaa754c69..0dc6967d66 100644 --- a/TSF/README.md +++ b/TSF/README.md @@ -221,7 +221,9 @@ The following unusual circumstances can, after careful consideration, justify th If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) changes, then this should trigger a re-evaluation of the statement. In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. * addition of automatic validators: - Recall that the SME-scores have different meanings depending on whether or not an automatic validator is implemented. In the absence of a validator, the SME shall assess their confidence in the statement based on linked artifacts (references) and their own knowledge. In the presence of an automatic validator, the SME shall assess only their confidence in the validator as an accurate measure of the truth of the statement. + Recall that the SME-scores have different meanings depending on whether or not an automatic validator is implemented. + In the absence of a validator, the SME shall assess their confidence in the statement based on linked artifacts (references) and their own knowledge. + In the presence of a validator, the SME shall assess only their confidence in the validator as an accurate measure of the truth of the statement. ## Validators