diff --git a/.github/workflows/build-deb.yml b/.github/workflows/build-deb.yml index 32990ab..fb6e888 100644 --- a/.github/workflows/build-deb.yml +++ b/.github/workflows/build-deb.yml @@ -85,8 +85,8 @@ jobs: strategy: fail-fast: false matrix: + distro: ["ubuntu:24.04"] arch: [x86_64, aarch64] - distro: ["ubuntu:24.04", "debian:trixie"] include: - arch: x86_64 runner: ubuntu-latest diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7727d2f..f00cdcd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: yum update -y # install development tools - yum install -y autoconf automake binutils bison flex gcc gcc-c++ gdb glibc-devel libtool make pkgconf pkgconf-m4 pkgconf-pkg-config rpm-build rpm-sign strace asciidoc byacc ctags diffstat elfutils-libelf-devel git intltool patchutils perl-Fedora-VSP perl-Sys-Syslog perl-generators pesign source-highlight systemtap valgrind valgrind-devel cmake expect rpmdevtools rpmlint perl clang + yum install -y autoconf automake binutils bison flex gcc gcc-c++ gdb glibc-devel libtool make pkgconf pkgconf-m4 pkgconf-pkg-config rpm-build rpm-sign strace asciidoc byacc ctags diffstat elfutils-libelf-devel git intltool patchutils perl-Fedora-VSP perl-Sys-Syslog perl-generators pesign source-highlight systemtap valgrind valgrind-devel cmake expect rpmdevtools rpmlint perl clang python3 # install rpmdevtools yum install -y git yum-utils diff --git a/.gitignore b/.gitignore index efa78c6..f5a43d7 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ cryptpilot-verity/benchmark/cachefs_mount/ cryptpilot-verity/benchmark/cachefs_verity_mount/ cryptpilot-verity/benchmark/verity_on_cachefs_mount/ cryptpilot-verity/benchmark/verity_source_data/ + +# Superpowers design docs +docs/superpowers/ diff --git a/Makefile b/Makefile index e51b477..ac61f6e 100644 --- a/Makefile +++ b/Makefile @@ -145,10 +145,14 @@ deb-install: deb-build dpkg -i ../cryptpilot-verity_*.deb ../cryptpilot-fde_*.deb ../cryptpilot-crypt_*.deb ../cryptpilot_*.deb apt-get install -f -y -.PHONE: run-test -run-test: install-test-depend +.PHONY: run-test +run-test: install-test-depend verity-testfiles cargo test -- --nocapture +.PHONY: verity-testfiles +verity-testfiles: + @cd verity-core && python3 make_testfiles.py + .PHONE: install-test-depend install-test-depend: [[ -e /tmp/pjdfstest/pjdfstest ]] || { cd /tmp/ && git clone https://github.com/pjd/pjdfstest.git && cd /tmp/pjdfstest && autoreconf -ifs && ./configure && make pjdfstest ; } diff --git a/cryptpilot-verity/Makefile b/cryptpilot-verity/Makefile index eb0165b..339c3de 100644 --- a/cryptpilot-verity/Makefile +++ b/cryptpilot-verity/Makefile @@ -39,13 +39,56 @@ test: clean-test @cargo run -- dump $(TEST_DATA_DIR) --print-metadata @echo "" - @echo "Step 5: Running verify command..." + @echo "Step 4b: Testing format with labels..." + @cargo run -- format $(TEST_DATA_DIR) --hash-output $(TEST_HASH_FILE) --force --label env=test --label version=1.0 + @echo "✓ Format with labels completed" + @echo "" + + @echo "Step 4c: Testing dump --print-labels..." + @cargo run -- dump $(TEST_DATA_DIR) --print-labels + @echo "" + + @echo "Step 4d: Testing dump --print-label for specific key..." + @ENV_VAL=$$(cargo run -- dump $(TEST_DATA_DIR) --print-label env 2>/dev/null); \ + if [ "$$ENV_VAL" = "test" ]; then \ + echo "✓ Label 'env' value correct: $$ENV_VAL"; \ + else \ + echo "✗ Label 'env' value mismatch! Expected 'test', got '$$ENV_VAL'"; \ + exit 1; \ + fi + @echo "" + + @echo "Step 4e: Testing dump --print-label for nonexistent key..." + @if cargo run -- dump $(TEST_DATA_DIR) --print-label nonexistent 2>/dev/null; then \ + echo "✗ Should have failed for nonexistent label key"; \ + exit 1; \ + else \ + echo "✓ Nonexistent label key correctly rejected"; \ + fi + @echo "" + + @echo "Step 4f: Verifying labels do not affect root hash..." + @cargo run -- format $(TEST_DATA_DIR) --hash-output $(TEST_HASH_FILE) --force + @HASH_NO_LABEL=$$(cat $(TEST_HASH_FILE)); \ + cargo run -- format $(TEST_DATA_DIR) --hash-output $(TEST_DIR)/hash_with_labels.txt --force --label env=test --label version=1.0 2>/dev/null; \ + HASH_WITH_LABEL=$$(cat $(TEST_DIR)/hash_with_labels.txt); \ + if [ "$$HASH_NO_LABEL" = "$$HASH_WITH_LABEL" ]; then \ + echo "✓ Root hash is identical with and without labels"; \ + else \ + echo "✗ Root hash differs! Labels should not affect hash."; \ + echo " Without labels: $$HASH_NO_LABEL"; \ + echo " With labels: $$HASH_WITH_LABEL"; \ + exit 1; \ + fi + @echo "" + + @echo "Step 9: Running verify command..." @HASH=$$(cat $(TEST_HASH_FILE)); \ cargo run -- verify $(TEST_DATA_DIR) $$HASH @echo "✓ Verification passed!" @echo "" - @echo "Step 6: Testing verification failure (modified file)..." + @echo "Step 10: Testing verification failure (modified file)..." @echo "modified content" > $(TEST_DATA_DIR)/file1.txt @HASH=$$(cat $(TEST_HASH_FILE)); \ if cargo run -- verify $(TEST_DATA_DIR) $$HASH 2>&1; then \ @@ -56,12 +99,12 @@ test: clean-test fi @echo "" - @echo "Step 7: Restoring original file for mount test..." + @echo "Step 11: Restoring original file for mount test..." @echo "test file 1 content" > $(TEST_DATA_DIR)/file1.txt @echo "✓ File restored" @echo "" - @echo "Step 8: Creating bind mount for tampering test later..." + @echo "Step 12: Creating bind mount for tampering test later..." @echo " → Creating bind mount to access underlying data during verity mount..." @mkdir -p $(TEST_BIND_DIR) @mount --bind $(TEST_DATA_DIR) $(TEST_BIND_DIR) 2>/dev/null || { echo "✗ Failed to create bind mount (may need sudo)"; exit 1; } @@ -71,7 +114,7 @@ test: clean-test - @echo "Step 9: Testing open command (in-place mount with built-in FUSE)..." + @echo "Step 13: Testing open command (in-place mount with built-in FUSE)..." @HASH=$$(cat $(TEST_HASH_FILE)); \ echo " → Mounting verity-fuse in-place on $(TEST_DATA_DIR)..."; \ cargo run -- open $(TEST_DATA_DIR) $(TEST_DATA_DIR) $$HASH 2>&1 & \ @@ -110,7 +153,7 @@ test: clean-test fi @echo "" - @echo "Step 10: Unmounting for tampering detection test..." + @echo "Step 14: Unmounting for tampering detection test..." @if mountpoint -q $(TEST_DATA_DIR) 2>/dev/null; then \ echo " → Unmounting in-place mount for tamper test..."; \ fusermount -u $(TEST_DATA_DIR) 2>/dev/null || umount $(TEST_DATA_DIR) 2>/dev/null || true; \ @@ -118,7 +161,7 @@ test: clean-test fi @echo "" - @echo "Step 11: Testing tampering detection (cache disabled, using bind mount)..." + @echo "Step 15: Testing tampering detection (cache disabled, using bind mount)..." @echo " → Tampering with underlying data via bind mount..." @echo "TAMPERED DATA" > $(TEST_BIND_DIR)/file1.txt @echo " → Opening with --block-cache-capacity 0 to disable cache for tamper detection..." @@ -166,7 +209,7 @@ test: clean-test @rmdir $(TEST_BIND_DIR) 2>/dev/null || true @echo "" - @echo "Step 12: Unmounting after tampering test..." + @echo "Step 16: Unmounting after tampering test..." @if mountpoint -q $(TEST_DATA_DIR) 2>/dev/null; then \ echo " → Unmounting..."; \ fusermount -u $(TEST_DATA_DIR) 2>/dev/null || cargo run -- close $(TEST_DATA_DIR) 2>/dev/null || true; \ diff --git a/cryptpilot-verity/README.md b/cryptpilot-verity/README.md index ea04c41..877884e 100644 --- a/cryptpilot-verity/README.md +++ b/cryptpilot-verity/README.md @@ -34,6 +34,7 @@ The CLI interface and subcommand design are intentionally similar to the `verity - POSIX metadata such as permissions bits, ownership (`uid`, `gid`), and timestamps. - Mount options, kernel-side permission checks, or higher-level application logic. - Integrity of files or directories that were never included in the formatted metadata; in practice such paths are ignored and do not appear in the exposed filesystem view. Likewise, if a file that was included in the metadata is later removed from the underlying filesystem, this is treated as absence rather than active tampering and does not by itself trigger an integrity failure. + - Labels (key-value metadata attached during format). Labels are stored in the metadata file but are not integrity-protected by the root hash. ## Security Notes @@ -73,7 +74,7 @@ All commands are subcommands of the `cryptpilot-verity` binary. Run `cryptpilot- ### `format` ```bash -cryptpilot-verity format [--metadata ] [--force] --hash-output +cryptpilot-verity format [--metadata ] [--force] [--label key=value]... --hash-output ``` - **Purpose**: Generate fs-verity metadata and the root hash for a given data directory. @@ -82,6 +83,7 @@ cryptpilot-verity format [--metadata ] [--force] --has - `--metadata, -m` **[optional]**: Path to the output metadata file (FlatBuffers-encoded). If not specified, defaults to `/cryptpilot-verity.metadata.fb`. - `--hash-output`: Path to write the root hash (use `-` for stdout). - `--force` **[optional]**: Overwrite an existing metadata file at the target path. Intended for re-formatting or third-party auditing of an already formatted directory. + - `--label key=value` **[optional, repeatable]**: Attach a label to the metadata. Labels are key-value pairs (Docker-style) stored in the metadata file. Can be specified multiple times. Labels are NOT included in the root hash calculation. ### `verify` @@ -101,6 +103,8 @@ cryptpilot-verity verify [--metadata ] [--metad ```bash cryptpilot-verity dump --print-metadata cryptpilot-verity dump --metadata --print-root-hash +cryptpilot-verity dump --print-label +cryptpilot-verity dump --print-labels ``` - **Purpose**: Inspect metadata and/or print only the root hash. @@ -109,6 +113,8 @@ cryptpilot-verity dump --metadata --print-root-hash - `--metadata` **[optional]**: Path to the metadata file to read directly. Either `--metadata` or `` must be specified (not both required). - `--print-metadata`: Print the full decoded metadata (must specify either this or `--print-root-hash`). - `--print-root-hash`: Print only the root hash (must specify either this or `--print-metadata`). + - `--print-label `: Print the value of a specific label key. Exits with an error if the key is not found. + - `--print-labels`: Print all labels (one `key=value` per line). Prints `(no labels)` if no labels were set during format. ### `open` diff --git a/cryptpilot-verity/README_zh.md b/cryptpilot-verity/README_zh.md index 4a69ea8..cbcf822 100644 --- a/cryptpilot-verity/README_zh.md +++ b/cryptpilot-verity/README_zh.md @@ -33,6 +33,7 @@ CLI 接口和子命令设计有意与 `veritysetup` 工具类似,以便熟悉 - POSIX 元数据,如权限位、所有权(`uid`、`gid`)和时间戳。 - 挂载选项、内核端权限检查或更高级别的应用程序逻辑。 - 从未包含在格式化元数据中的文件或目录的完整性;实际上,此类路径会被忽略,并且不会出现在公开的文件系统视图中。同样,如果元数据中包含的文件后来从底层文件系统中删除,这将被视为不存在而不是主动篡改,本身不会触发完整性失败。 + - 标签(格式时附加的键值元数据)。标签存储在元数据文件中但不受 root hash 完整性保护。 ## 安全注意事项 @@ -72,7 +73,7 @@ CLI 接口和子命令设计有意与 `veritysetup` 工具类似,以便熟悉 ### `format` ```bash -cryptpilot-verity format [--metadata ] [--force] --hash-output +cryptpilot-verity format [--metadata ] [--force] [--label key=value]... --hash-output ``` - **目的**:为给定的数据目录生成 fs-verity 元数据和根哈希。 @@ -81,6 +82,7 @@ cryptpilot-verity format [--metadata ] [--force] --has - `--metadata, -m` **[可选]**:输出元数据文件(FlatBuffers 编码)的路径。如果未指定,默认为 `/cryptpilot-verity.metadata.fb`。 - `--hash-output`:写入根哈希的路径(使用 `-` 表示标准输出)。 - `--force` **[可选]**:覆盖目标路径上的现有元数据文件。用于重新格式化或对已格式化目录进行第三方审计。 + - `--label key=value` **[可选,可重复]**:为元数据附加标签。标签是键值对(Docker 风格),存储在元数据文件中但不参与 root hash 计算。 ### `verify` @@ -100,6 +102,8 @@ cryptpilot-verity verify [--metadata ] [--metad ```bash cryptpilot-verity dump --print-metadata cryptpilot-verity dump --metadata --print-root-hash +cryptpilot-verity dump --print-label +cryptpilot-verity dump --print-labels ``` - **目的**:检查元数据和/或仅打印根哈希。 @@ -108,6 +112,8 @@ cryptpilot-verity dump --metadata --print-root-hash - `--metadata` **[可选]**:直接读取的元数据文件路径。必须指定 `--metadata` 或 `` 之一(不需要同时指定两者)。 - `--print-metadata`:打印完整的解码元数据(必须指定此项或 `--print-root-hash`)。 - `--print-root-hash`:仅打印根哈希(必须指定此项或 `--print-metadata`)。 + - `--print-label `:输出指定标签键的值。如果键不存在则报错退出。 + - `--print-labels`:输出所有标签(每行一个 `key=value`)。如果未设置标签则输出 `(no labels)`。 ### `open` diff --git a/cryptpilot-verity/benchmark/benchmark-verity-fuse.sh b/cryptpilot-verity/benchmark/benchmark-verity-fuse.sh index 71b6469..23fda3a 100755 --- a/cryptpilot-verity/benchmark/benchmark-verity-fuse.sh +++ b/cryptpilot-verity/benchmark/benchmark-verity-fuse.sh @@ -92,11 +92,13 @@ calc_average() { calc_stddev() { local arr=("$@") - local avg=$(calc_average "${arr[@]}") + local avg + avg=$(calc_average "${arr[@]}") local count=${#arr[@]} local sum_sq=0 for val in "${arr[@]}"; do - local diff=$(echo "$val - $avg" | bc -l) + local diff + diff=$(echo "$val - $avg" | bc -l) sum_sq=$(echo "$sum_sq + ($diff * $diff)" | bc -l) done echo "scale=3; sqrt($sum_sq / $count)" | bc -l @@ -216,7 +218,8 @@ setup_gocryptfs_encrypted() { echo "$password" | gocryptfs -init -q "$encrypted_dir" 2>"$LOG_DIR/gocryptfs_init.log" # Mount temporarily to copy files - local temp_mount=$(mktemp -d) + local temp_mount + temp_mount=$(mktemp -d) echo "$password" | gocryptfs -q "$encrypted_dir" "$temp_mount" 2>"$LOG_DIR/gocryptfs_mount.log" # Copy files @@ -241,7 +244,8 @@ setup_verity_fuse() { "$CRYPTPILOT_VERITY" format "$data_dir" --hash-output "$RESULT_DIR/${label}_root_hash.txt" --force \ >> "$LOG_DIR/${label}_verity_format.log" 2>&1 - local root_hash=$(cat "$RESULT_DIR/${label}_root_hash.txt") + local root_hash + root_hash=$(cat "$RESULT_DIR/${label}_root_hash.txt") log_info "Root Hash: $root_hash" mount_verity_fuse "$data_dir" "$mount_point" "$root_hash" "$label" @@ -352,16 +356,21 @@ test_sequential_read_dd_single() { drop_caches - local output=$(dd if="$file" of=/dev/null bs=1M 2>&1) - local speed=$(echo "$output" | grep -oP '[\d.]+\s*(MB|GB)/s' | head -1 | grep -oP '[\d.]+') - local unit=$(echo "$output" | grep -oP '[\d.]+\s*(MB|GB)/s' | head -1 | grep -oP '(MB|GB)') + local output + output=$(dd if="$file" of=/dev/null bs=1M 2>&1) + local speed_unit + speed_unit=$(echo "$output" | grep -oP '[\d.]+\s*(MB|GB)/s' | head -1) + local speed + speed=$(echo "$speed_unit" | grep -oP '[\d.]+') + local unit + unit=$(echo "$speed_unit" | grep -oP '(MB|GB)') if [ "$unit" = "GB" ]; then speed=$(echo "$speed * 1024" | bc -l) fi record_raw_result "$label" "sequential_read_dd" "$run" "$speed" "MB/s" - log_info " Run $run: sequential_read_dd = ${speed} MB/s" + log_info " Run $run: sequential_read_dd=${speed} MB/s" } test_sequential_read_fio_single() { @@ -371,7 +380,8 @@ test_sequential_read_fio_single() { drop_caches - local output=$(fio --name=seq_read \ + local output + output=$(fio --name=seq_read \ --filename="$target_dir/large_files/file_1.bin" \ --rw=read \ --bs=4k \ @@ -382,11 +392,13 @@ test_sequential_read_fio_single() { --group_reporting \ --output-format=json 2>/dev/null) - local bw_kb=$(echo "$output" | jq -r '.jobs[0].read.bw') - local bw_mb=$(echo "scale=3; $bw_kb / 1024" | bc -l) + local bw_kb; + bw_kb=$(echo "$output" | jq -r '.jobs[0].read.bw') + local bw_mb; + bw_mb=$(echo "scale=3; $bw_kb / 1024" | bc -l) record_raw_result "$label" "sequential_read_fio" "$run" "$bw_mb" "MB/s" - log_info " Run $run: sequential_read_fio = ${bw_mb} MB/s" + log_info " Run $run: sequential_read_fio=${bw_mb} MB/s" } test_random_read_fio_single() { @@ -396,7 +408,8 @@ test_random_read_fio_single() { drop_caches - local output=$(fio --name=rand_read \ + local output + output=$(fio --name=rand_read \ --filename="$target_dir/large_files/file_1.bin" \ --rw=randread \ --bs=4k \ @@ -407,13 +420,16 @@ test_random_read_fio_single() { --group_reporting \ --output-format=json 2>/dev/null) - local iops=$(echo "$output" | jq -r '.jobs[0].read.iops') - local lat_ns=$(echo "$output" | jq -r '.jobs[0].read.lat_ns.mean') - local lat_ms=$(echo "scale=3; $lat_ns / 1000000" | bc -l) + local iops; + iops=$(echo "$output" | jq -r '.jobs[0].read.iops') + local lat_ns; + lat_ns=$(echo "$output" | jq -r '.jobs[0].read.lat_ns.mean') + local lat_ms; + lat_ms=$(echo "scale=3; $lat_ns / 1000000" | bc -l) record_raw_result "$label" "random_read_iops" "$run" "$iops" "IOPS" record_raw_result "$label" "random_read_latency" "$run" "$lat_ms" "ms" - log_info " Run $run: random_read_iops = ${iops}, latency = ${lat_ms} ms" + log_info " Run $run: random_read_iops=${iops}, latency=${lat_ms} ms" } test_small_files_read_single() { @@ -423,18 +439,22 @@ test_small_files_read_single() { drop_caches - local start=$(date +%s.%N) + local start; + start=$(date +%s.%N) for i in $(seq 1 $SMALL_FILE_COUNT); do cat "$target_dir/small_files/file_$i.bin" > /dev/null done - local end=$(date +%s.%N) + local end; + end=$(date +%s.%N) - local duration=$(echo "$end - $start" | bc -l) - local ops_per_sec=$(echo "scale=3; $SMALL_FILE_COUNT / $duration" | bc -l) + local duration; + duration=$(echo "$end - $start" | bc -l) + local ops_per_sec; + ops_per_sec=$(echo "scale=3; $SMALL_FILE_COUNT / $duration" | bc -l) record_raw_result "$label" "small_files_read" "$run" "$duration" "seconds" record_raw_result "$label" "small_files_ops" "$run" "$ops_per_sec" "ops/s" - log_info " Run $run: small_files_read = ${duration} sec (${ops_per_sec} ops/s)" + log_info " Run $run: small_files_read=${duration} sec (${ops_per_sec} ops/s)" } test_readdir_single() { @@ -444,14 +464,17 @@ test_readdir_single() { drop_caches - local start=$(date +%s.%N) + local start; + start=$(date +%s.%N) ls -laR "$target_dir" > /dev/null 2>&1 - local end=$(date +%s.%N) + local end; + end=$(date +%s.%N) - local duration=$(echo "($end - $start) * 1000" | bc -l) + local duration + duration=$(echo "($end - $start) * 1000" | bc -l) record_raw_result "$label" "readdir" "$run" "$duration" "ms" - log_info " Run $run: readdir = ${duration} ms" + log_info " Run $run: readdir=${duration} ms" } # Run all single-iteration tests on a target directory @@ -508,12 +531,15 @@ calculate_statistics() { echo "label,test,value,stddev,unit" > "$RESULT_DIR/results.csv" # Get unique label,test combinations - local combinations=$(tail -n +2 "$RESULT_DIR/raw_results.csv" | cut -d',' -f1,2 | sort -u) + local combinations; + combinations=$(tail -n +2 "$RESULT_DIR/raw_results.csv" | cut -d',' -f1,2 | sort -u) while IFS=',' read -r label test; do # Get all values for this label,test - local values=$(grep "^$label,$test," "$RESULT_DIR/raw_results.csv" | cut -d',' -f4) - local unit=$(grep "^$label,$test," "$RESULT_DIR/raw_results.csv" | head -1 | cut -d',' -f5) + local values; + values=$(grep "^$label,$test," "$RESULT_DIR/raw_results.csv" | cut -d',' -f4) + local unit; + unit=$(grep "^$label,$test," "$RESULT_DIR/raw_results.csv" | head -1 | cut -d',' -f5) # Convert to array local arr=() @@ -522,8 +548,10 @@ calculate_statistics() { done <<< "$values" # Calculate statistics - local avg=$(calc_average "${arr[@]}") - local stddev=$(calc_stddev "${arr[@]}") + local avg; + avg=$(calc_average "${arr[@]}") + local stddev; + stddev=$(calc_stddev "${arr[@]}") echo "$label,$test,$avg,$stddev,$unit" >> "$RESULT_DIR/results.csv" log_info " $label,$test: avg=$avg, stddev=$stddev" @@ -556,7 +584,8 @@ run_all_tests() { log_info "Formatting source data with verity..." "$CRYPTPILOT_VERITY" format "$verity_source_dir" --hash-output "$RESULT_DIR/cachefs_verity_root_hash.txt" --force \ >> "$LOG_DIR/cachefs_verity_format.log" 2>&1 - local verity_hash=$(cat "$RESULT_DIR/cachefs_verity_root_hash.txt") + local verity_hash; + verity_hash=$(cat "$RESULT_DIR/cachefs_verity_root_hash.txt") log_info "verity root hash: $verity_hash" setup_gocryptfs_encrypted "$verity_source_dir" "$VERITY_ENCRYPTED_DATA_DIR" "$GOCRYPTFS_PASSWORD" @@ -683,7 +712,8 @@ generate_report() { printf "| %-28s |" "$test ($unit)" local baseline_val="" for label in "${labels[@]}"; do - local val=$(grep "^$label,$test," "$RESULT_DIR/results.csv" 2>/dev/null | cut -d',' -f3) + local val; + val=$(grep "^$label,$test," "$RESULT_DIR/results.csv" 2>/dev/null | cut -d',' -f3) if [ -n "$val" ]; then if [ "$label" = "baseline" ]; then baseline_val="$val" @@ -692,8 +722,10 @@ generate_report() { # Calculate change percentage vs baseline if [ -n "$baseline_val" ] && [ "$baseline_val" != "0" ]; then # Ensure numbers have leading zero for bc - local safe_val=$(echo "$val" | sed 's/^\./0./') - local safe_baseline=$(echo "$baseline_val" | sed 's/^\./0./') + local safe_val; + safe_val=${val/#./0.} + local safe_baseline + safe_baseline=${baseline_val/#./0.} # Calculate change: (val - baseline) / baseline * 100 # For lower_is_better metrics, invert the sign diff --git a/cryptpilot-verity/src/cli.rs b/cryptpilot-verity/src/cli.rs index 72cc375..ea8d612 100644 --- a/cryptpilot-verity/src/cli.rs +++ b/cryptpilot-verity/src/cli.rs @@ -1,5 +1,15 @@ use clap::{Parser, Subcommand}; +fn parse_label(s: &str) -> Result<(String, String), String> { + let Some((key, value)) = s.split_once('=') else { + return Err(format!("invalid label format '{}', expected key=value", s)); + }; + if key.is_empty() { + return Err("label key cannot be empty".to_string()); + } + Ok((key.to_string(), value.to_string())) +} + use crate::build::CLAP_LONG_VERSION; #[derive(Parser, Debug)] @@ -52,6 +62,10 @@ pub struct FormatOptions { /// Intended for re-formatting or third-party auditing of an already formatted directory. #[arg(long)] pub force: bool, + + /// Label in key=value format. Can be specified multiple times. + #[arg(long = "label", value_parser = parse_label)] + pub labels: Vec<(String, String)>, } #[derive(Parser, Debug)] @@ -90,12 +104,20 @@ pub struct DumpOptions { pub metadata: Option, /// Print full metadata - #[arg(long, required_unless_present = "print_root_hash")] + #[arg(long, required_unless_present_any = ["print_root_hash", "print_label", "print_labels"])] pub print_metadata: bool, /// Print only the root hash instead of full metadata - #[arg(long, required_unless_present = "print_metadata")] + #[arg(long, required_unless_present_any = ["print_metadata", "print_label", "print_labels"])] pub print_root_hash: bool, + + /// Print the value of a specific label key + #[arg(long)] + pub print_label: Option, + + /// Print all labels + #[arg(long, required_unless_present_any = ["print_metadata", "print_root_hash", "print_label"])] + pub print_labels: bool, } #[derive(Parser, Debug)] diff --git a/cryptpilot-verity/src/cmd/dump.rs b/cryptpilot-verity/src/cmd/dump.rs index 14cce37..daa52ca 100644 --- a/cryptpilot-verity/src/cmd/dump.rs +++ b/cryptpilot-verity/src/cmd/dump.rs @@ -34,13 +34,21 @@ impl Command for DumpCommand { println!("{}", root_hash); } else if self.options.print_metadata { // Parse metadata - let file_infos = crate::metadata::deserialize_metadata(&metadata_bytes)?; + let metadata_info = crate::metadata::deserialize_metadata(&metadata_bytes)?; + let file_infos = &metadata_info.file_infos; // Print metadata in human-readable format println!("Metadata contents:"); println!("Total files: {}", file_infos.len()); + if !metadata_info.labels.is_empty() { + println!(); + println!("Labels:"); + for (key, value) in &metadata_info.labels { + println!(" {}={}", key, value); + } + } println!(); - for info in &file_infos { + for info in file_infos { println!("File: {}", info.path); println!(" Descriptor Hash: {}", info.descriptor_hash); println!(" FsVerity Descriptor:"); @@ -61,8 +69,27 @@ impl Command for DumpCommand { ); println!(); } + } else if let Some(ref key) = self.options.print_label { + let metadata_info = crate::metadata::deserialize_metadata(&metadata_bytes)?; + match metadata_info.labels.get(key) { + Some(value) => println!("{}", value), + None => { + anyhow::bail!("label key '{}' not found", key); + } + } + } else if self.options.print_labels { + let metadata_info = crate::metadata::deserialize_metadata(&metadata_bytes)?; + if metadata_info.labels.is_empty() { + println!("(no labels)"); + } else { + for (key, value) in &metadata_info.labels { + println!("{}={}", key, value); + } + } } else { - anyhow::bail!("Either --print-root-hash or --print-metadata must be specified"); + anyhow::bail!( + "Either --print-root-hash, --print-metadata, --print-label, or --print-labels must be specified" + ); }; Ok(()) diff --git a/cryptpilot-verity/src/cmd/format.rs b/cryptpilot-verity/src/cmd/format.rs index 5f2131b..4a2e6fb 100644 --- a/cryptpilot-verity/src/cmd/format.rs +++ b/cryptpilot-verity/src/cmd/format.rs @@ -81,8 +81,14 @@ impl Command for FormatCommand { file_infos.push(info); } + // Collect labels from CLI options into a BTreeMap + let mut labels = std::collections::BTreeMap::new(); + for (key, value) in &self.options.labels { + labels.insert(key.clone(), value.clone()); + } + // Serialize to FlatBuffers format - let fb_data = crate::metadata::serialize_metadata(&file_infos)?; + let fb_data = crate::metadata::serialize_metadata(&file_infos, &labels)?; tracing::debug!( "Generated FlatBuffers metadata with {} entries, metadata size: {} bytes", file_infos.len(), diff --git a/cryptpilot-verity/src/cmd/open.rs b/cryptpilot-verity/src/cmd/open.rs index 89d61f8..39b1694 100644 --- a/cryptpilot-verity/src/cmd/open.rs +++ b/cryptpilot-verity/src/cmd/open.rs @@ -78,7 +78,8 @@ impl Command for OpenCommand { tracing::info!("Metadata hash verification passed"); // Parse metadata - let file_infos = crate::metadata::deserialize_metadata(&metadata_bytes)?; + let metadata_info = crate::metadata::deserialize_metadata(&metadata_bytes)?; + let file_infos = metadata_info.file_infos; tracing::info!("Metadata contains {} files", file_infos.len()); // Verify metadata integrity for each file @@ -118,11 +119,8 @@ impl OpenCommand { let verifier = VerityVerifier::new(file_infos)?; // Create VerityFS instance with real verifier - let fs = VerityFS::new_with_block_cache( - source, - verifier, - self.options.block_cache_capacity, - )?; + let fs = + VerityFS::new_with_block_cache(source, verifier, self.options.block_cache_capacity)?; // Prepare mount options let options = vec![ diff --git a/cryptpilot-verity/src/cmd/verify.rs b/cryptpilot-verity/src/cmd/verify.rs index 026445d..8278c2a 100644 --- a/cryptpilot-verity/src/cmd/verify.rs +++ b/cryptpilot-verity/src/cmd/verify.rs @@ -45,7 +45,8 @@ impl Command for VerifyCommand { tracing::info!("Root hash verification passed"); // Parse metadata after hash verification - let file_infos = crate::metadata::deserialize_metadata(&metadata_bytes)?; + let metadata_info = crate::metadata::deserialize_metadata(&metadata_bytes)?; + let file_infos = metadata_info.file_infos; // Verify self-consistency of metadata entries (always required) for info in &file_infos { diff --git a/cryptpilot-verity/src/metadata/.gitignore b/cryptpilot-verity/src/metadata/.gitignore index b4372e8..e69de29 100644 --- a/cryptpilot-verity/src/metadata/.gitignore +++ b/cryptpilot-verity/src/metadata/.gitignore @@ -1,2 +0,0 @@ -metadata_generated.rs -metadata_hash_generated.rs diff --git a/cryptpilot-verity/src/metadata/metadata.fbs b/cryptpilot-verity/src/metadata/metadata.fbs index 33379ad..746c8d0 100644 --- a/cryptpilot-verity/src/metadata/metadata.fbs +++ b/cryptpilot-verity/src/metadata/metadata.fbs @@ -12,6 +12,12 @@ table FsVerityDescriptor { salt: [ubyte]; // salt prepended to each hashed block } +// Key-value pair for metadata labels +table KeyValue { + key: string; + value: string; +} + // Single file information with fs-verity data table FileInfo { path: string; // relative path of the file @@ -20,10 +26,11 @@ table FileInfo { descriptor_hash: string; // hex-encoded descriptor hash (final measurement) } -// Root metadata structure containing all file information +// Root metadata structures containing all file information table Metadata { version: uint = 1; // metadata format version for backward compatibility files: [FileInfo]; + labels: [KeyValue]; } root_type Metadata; diff --git a/cryptpilot-verity/src/metadata/metadata_generated.rs b/cryptpilot-verity/src/metadata/metadata_generated.rs new file mode 100644 index 0000000..d80afbd --- /dev/null +++ b/cryptpilot-verity/src/metadata/metadata_generated.rs @@ -0,0 +1,880 @@ +// automatically generated by the FlatBuffers compiler, do not modify +// @generated +extern crate alloc; + +#[allow(unused_imports, dead_code)] +pub mod cryptpilot { + + #[allow(unused_imports, dead_code)] + pub mod verity { + + pub enum FsVerityDescriptorOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct FsVerityDescriptor<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for FsVerityDescriptor<'a> { + type Inner = FsVerityDescriptor<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> FsVerityDescriptor<'a> { + pub const VT_VERSION: ::flatbuffers::VOffsetT = 4; + pub const VT_HASH_ALGORITHM: ::flatbuffers::VOffsetT = 6; + pub const VT_LOG_BLOCKSIZE: ::flatbuffers::VOffsetT = 8; + pub const VT_DATA_SIZE: ::flatbuffers::VOffsetT = 10; + pub const VT_ROOT_HASH: ::flatbuffers::VOffsetT = 12; + pub const VT_SALT: ::flatbuffers::VOffsetT = 14; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + FsVerityDescriptor { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args FsVerityDescriptorArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = FsVerityDescriptorBuilder::new(_fbb); + builder.add_data_size(args.data_size); + if let Some(x) = args.salt { + builder.add_salt(x); + } + if let Some(x) = args.root_hash { + builder.add_root_hash(x); + } + builder.add_log_blocksize(args.log_blocksize); + builder.add_hash_algorithm(args.hash_algorithm); + builder.add_version(args.version); + builder.finish() + } + + #[inline] + pub fn version(&self) -> u8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(FsVerityDescriptor::VT_VERSION, Some(0)) + .unwrap() + } + } + #[inline] + pub fn hash_algorithm(&self) -> u8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(FsVerityDescriptor::VT_HASH_ALGORITHM, Some(0)) + .unwrap() + } + } + #[inline] + pub fn log_blocksize(&self) -> u8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(FsVerityDescriptor::VT_LOG_BLOCKSIZE, Some(0)) + .unwrap() + } + } + #[inline] + pub fn data_size(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(FsVerityDescriptor::VT_DATA_SIZE, Some(0)) + .unwrap() + } + } + #[inline] + pub fn root_hash(&self) -> Option<::flatbuffers::Vector<'a, u8>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>( + FsVerityDescriptor::VT_ROOT_HASH, + None, + ) + } + } + #[inline] + pub fn salt(&self) -> Option<::flatbuffers::Vector<'a, u8>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>( + FsVerityDescriptor::VT_SALT, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for FsVerityDescriptor<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::("version", Self::VT_VERSION, false)? + .visit_field::("hash_algorithm", Self::VT_HASH_ALGORITHM, false)? + .visit_field::("log_blocksize", Self::VT_LOG_BLOCKSIZE, false)? + .visit_field::("data_size", Self::VT_DATA_SIZE, false)? + .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, u8>>>( + "root_hash", + Self::VT_ROOT_HASH, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, u8>>>( + "salt", + Self::VT_SALT, + false, + )? + .finish(); + Ok(()) + } + } + pub struct FsVerityDescriptorArgs<'a> { + pub version: u8, + pub hash_algorithm: u8, + pub log_blocksize: u8, + pub data_size: u64, + pub root_hash: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, + pub salt: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, + } + impl<'a> Default for FsVerityDescriptorArgs<'a> { + #[inline] + fn default() -> Self { + FsVerityDescriptorArgs { + version: 0, + hash_algorithm: 0, + log_blocksize: 0, + data_size: 0, + root_hash: None, + salt: None, + } + } + } + + pub struct FsVerityDescriptorBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> FsVerityDescriptorBuilder<'a, 'b, A> { + #[inline] + pub fn add_version(&mut self, version: u8) { + self.fbb_ + .push_slot::(FsVerityDescriptor::VT_VERSION, version, 0); + } + #[inline] + pub fn add_hash_algorithm(&mut self, hash_algorithm: u8) { + self.fbb_ + .push_slot::(FsVerityDescriptor::VT_HASH_ALGORITHM, hash_algorithm, 0); + } + #[inline] + pub fn add_log_blocksize(&mut self, log_blocksize: u8) { + self.fbb_ + .push_slot::(FsVerityDescriptor::VT_LOG_BLOCKSIZE, log_blocksize, 0); + } + #[inline] + pub fn add_data_size(&mut self, data_size: u64) { + self.fbb_ + .push_slot::(FsVerityDescriptor::VT_DATA_SIZE, data_size, 0); + } + #[inline] + pub fn add_root_hash( + &mut self, + root_hash: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, u8>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + FsVerityDescriptor::VT_ROOT_HASH, + root_hash, + ); + } + #[inline] + pub fn add_salt( + &mut self, + salt: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, u8>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + FsVerityDescriptor::VT_SALT, + salt, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> FsVerityDescriptorBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + FsVerityDescriptorBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for FsVerityDescriptor<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("FsVerityDescriptor"); + ds.field("version", &self.version()); + ds.field("hash_algorithm", &self.hash_algorithm()); + ds.field("log_blocksize", &self.log_blocksize()); + ds.field("data_size", &self.data_size()); + ds.field("root_hash", &self.root_hash()); + ds.field("salt", &self.salt()); + ds.finish() + } + } + pub enum KeyValueOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct KeyValue<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for KeyValue<'a> { + type Inner = KeyValue<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> KeyValue<'a> { + pub const VT_KEY: ::flatbuffers::VOffsetT = 4; + pub const VT_VALUE: ::flatbuffers::VOffsetT = 6; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + KeyValue { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args KeyValueArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = KeyValueBuilder::new(_fbb); + if let Some(x) = args.value { + builder.add_value(x); + } + if let Some(x) = args.key { + builder.add_key(x); + } + builder.finish() + } + + #[inline] + pub fn key(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<&str>>(KeyValue::VT_KEY, None) + } + } + #[inline] + pub fn value(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<&str>>(KeyValue::VT_VALUE, None) + } + } + } + + impl ::flatbuffers::Verifiable for KeyValue<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "key", + Self::VT_KEY, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "value", + Self::VT_VALUE, + false, + )? + .finish(); + Ok(()) + } + } + pub struct KeyValueArgs<'a> { + pub key: Option<::flatbuffers::WIPOffset<&'a str>>, + pub value: Option<::flatbuffers::WIPOffset<&'a str>>, + } + impl<'a> Default for KeyValueArgs<'a> { + #[inline] + fn default() -> Self { + KeyValueArgs { + key: None, + value: None, + } + } + } + + pub struct KeyValueBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> KeyValueBuilder<'a, 'b, A> { + #[inline] + pub fn add_key(&mut self, key: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::<::flatbuffers::WIPOffset<_>>(KeyValue::VT_KEY, key); + } + #[inline] + pub fn add_value(&mut self, value: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::<::flatbuffers::WIPOffset<_>>(KeyValue::VT_VALUE, value); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> KeyValueBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + KeyValueBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for KeyValue<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("KeyValue"); + ds.field("key", &self.key()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum FileInfoOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct FileInfo<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for FileInfo<'a> { + type Inner = FileInfo<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> FileInfo<'a> { + pub const VT_PATH: ::flatbuffers::VOffsetT = 4; + pub const VT_DESCRIPTOR: ::flatbuffers::VOffsetT = 6; + pub const VT_MERKLE_TREE_LEVEL1: ::flatbuffers::VOffsetT = 8; + pub const VT_DESCRIPTOR_HASH: ::flatbuffers::VOffsetT = 10; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + FileInfo { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args FileInfoArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = FileInfoBuilder::new(_fbb); + if let Some(x) = args.descriptor_hash { + builder.add_descriptor_hash(x); + } + if let Some(x) = args.merkle_tree_level1 { + builder.add_merkle_tree_level1(x); + } + if let Some(x) = args.descriptor { + builder.add_descriptor(x); + } + if let Some(x) = args.path { + builder.add_path(x); + } + builder.finish() + } + + #[inline] + pub fn path(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<&str>>(FileInfo::VT_PATH, None) + } + } + #[inline] + pub fn descriptor(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset>( + FileInfo::VT_DESCRIPTOR, + None, + ) + } + } + #[inline] + pub fn merkle_tree_level1(&self) -> Option<::flatbuffers::Vector<'a, u8>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>( + FileInfo::VT_MERKLE_TREE_LEVEL1, + None, + ) + } + } + #[inline] + pub fn descriptor_hash(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>( + FileInfo::VT_DESCRIPTOR_HASH, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for FileInfo<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "path", + Self::VT_PATH, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset>( + "descriptor", + Self::VT_DESCRIPTOR, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, u8>>>( + "merkle_tree_level1", + Self::VT_MERKLE_TREE_LEVEL1, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "descriptor_hash", + Self::VT_DESCRIPTOR_HASH, + false, + )? + .finish(); + Ok(()) + } + } + pub struct FileInfoArgs<'a> { + pub path: Option<::flatbuffers::WIPOffset<&'a str>>, + pub descriptor: Option<::flatbuffers::WIPOffset>>, + pub merkle_tree_level1: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, + pub descriptor_hash: Option<::flatbuffers::WIPOffset<&'a str>>, + } + impl<'a> Default for FileInfoArgs<'a> { + #[inline] + fn default() -> Self { + FileInfoArgs { + path: None, + descriptor: None, + merkle_tree_level1: None, + descriptor_hash: None, + } + } + } + + pub struct FileInfoBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> FileInfoBuilder<'a, 'b, A> { + #[inline] + pub fn add_path(&mut self, path: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::<::flatbuffers::WIPOffset<_>>(FileInfo::VT_PATH, path); + } + #[inline] + pub fn add_descriptor( + &mut self, + descriptor: ::flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::<::flatbuffers::WIPOffset>( + FileInfo::VT_DESCRIPTOR, + descriptor, + ); + } + #[inline] + pub fn add_merkle_tree_level1( + &mut self, + merkle_tree_level1: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, u8>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + FileInfo::VT_MERKLE_TREE_LEVEL1, + merkle_tree_level1, + ); + } + #[inline] + pub fn add_descriptor_hash( + &mut self, + descriptor_hash: ::flatbuffers::WIPOffset<&'b str>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + FileInfo::VT_DESCRIPTOR_HASH, + descriptor_hash, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> FileInfoBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + FileInfoBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for FileInfo<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("FileInfo"); + ds.field("path", &self.path()); + ds.field("descriptor", &self.descriptor()); + ds.field("merkle_tree_level1", &self.merkle_tree_level1()); + ds.field("descriptor_hash", &self.descriptor_hash()); + ds.finish() + } + } + pub enum MetadataOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct Metadata<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for Metadata<'a> { + type Inner = Metadata<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> Metadata<'a> { + pub const VT_VERSION: ::flatbuffers::VOffsetT = 4; + pub const VT_FILES: ::flatbuffers::VOffsetT = 6; + pub const VT_LABELS: ::flatbuffers::VOffsetT = 8; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + Metadata { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args MetadataArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = MetadataBuilder::new(_fbb); + if let Some(x) = args.labels { + builder.add_labels(x); + } + if let Some(x) = args.files { + builder.add_files(x); + } + builder.add_version(args.version); + builder.finish() + } + + #[inline] + pub fn version(&self) -> u32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Metadata::VT_VERSION, Some(1)).unwrap() } + } + #[inline] + pub fn files( + &self, + ) -> Option<::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>> + { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>, + >>(Metadata::VT_FILES, None) + } + } + #[inline] + pub fn labels( + &self, + ) -> Option<::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>> + { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>, + >>(Metadata::VT_LABELS, None) + } + } + } + + impl ::flatbuffers::Verifiable for Metadata<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::("version", Self::VT_VERSION, false)? + .visit_field::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector<'_, ::flatbuffers::ForwardsUOffset>, + >>("files", Self::VT_FILES, false)? + .visit_field::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector<'_, ::flatbuffers::ForwardsUOffset>, + >>("labels", Self::VT_LABELS, false)? + .finish(); + Ok(()) + } + } + pub struct MetadataArgs<'a> { + pub version: u32, + pub files: Option< + ::flatbuffers::WIPOffset< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>, + >, + >, + pub labels: Option< + ::flatbuffers::WIPOffset< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>, + >, + >, + } + impl<'a> Default for MetadataArgs<'a> { + #[inline] + fn default() -> Self { + MetadataArgs { + version: 1, + files: None, + labels: None, + } + } + } + + pub struct MetadataBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> MetadataBuilder<'a, 'b, A> { + #[inline] + pub fn add_version(&mut self, version: u32) { + self.fbb_.push_slot::(Metadata::VT_VERSION, version, 1); + } + #[inline] + pub fn add_files( + &mut self, + files: ::flatbuffers::WIPOffset< + ::flatbuffers::Vector<'b, ::flatbuffers::ForwardsUOffset>>, + >, + ) { + self.fbb_ + .push_slot_always::<::flatbuffers::WIPOffset<_>>(Metadata::VT_FILES, files); + } + #[inline] + pub fn add_labels( + &mut self, + labels: ::flatbuffers::WIPOffset< + ::flatbuffers::Vector<'b, ::flatbuffers::ForwardsUOffset>>, + >, + ) { + self.fbb_ + .push_slot_always::<::flatbuffers::WIPOffset<_>>(Metadata::VT_LABELS, labels); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> MetadataBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + MetadataBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for Metadata<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("Metadata"); + ds.field("version", &self.version()); + ds.field("files", &self.files()); + ds.field("labels", &self.labels()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `Metadata` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_metadata_unchecked`. + pub fn root_as_metadata( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `Metadata` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_metadata_unchecked`. + pub fn size_prefixed_root_as_metadata( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `Metadata` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_metadata_unchecked`. + pub fn root_as_metadata_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `Metadata` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_metadata_unchecked`. + pub fn size_prefixed_root_as_metadata_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a Metadata and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `Metadata`. + pub unsafe fn root_as_metadata_unchecked(buf: &[u8]) -> Metadata<'_> { + unsafe { ::flatbuffers::root_unchecked::(buf) } + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed Metadata and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `Metadata`. + pub unsafe fn size_prefixed_root_as_metadata_unchecked(buf: &[u8]) -> Metadata<'_> { + unsafe { ::flatbuffers::size_prefixed_root_unchecked::(buf) } + } + #[inline] + pub fn finish_metadata_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_metadata_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } + } // pub mod verity +} // pub mod cryptpilot diff --git a/cryptpilot-verity/src/metadata/metadata_hash_generated.rs b/cryptpilot-verity/src/metadata/metadata_hash_generated.rs new file mode 100644 index 0000000..81557fd --- /dev/null +++ b/cryptpilot-verity/src/metadata/metadata_hash_generated.rs @@ -0,0 +1,393 @@ +// automatically generated by the FlatBuffers compiler, do not modify +// @generated +extern crate alloc; + +#[allow(unused_imports, dead_code)] +pub mod cryptpilot { + + #[allow(unused_imports, dead_code)] + pub mod verity { + + #[allow(unused_imports, dead_code)] + pub mod hash { + + pub enum FileHashEntryOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct FileHashEntry<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for FileHashEntry<'a> { + type Inner = FileHashEntry<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> FileHashEntry<'a> { + pub const VT_PATH: ::flatbuffers::VOffsetT = 4; + pub const VT_DESCRIPTOR_HASH: ::flatbuffers::VOffsetT = 6; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + FileHashEntry { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args FileHashEntryArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = FileHashEntryBuilder::new(_fbb); + if let Some(x) = args.descriptor_hash { + builder.add_descriptor_hash(x); + } + if let Some(x) = args.path { + builder.add_path(x); + } + builder.finish() + } + + #[inline] + pub fn path(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>( + FileHashEntry::VT_PATH, + None, + ) + } + } + #[inline] + pub fn descriptor_hash(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>( + FileHashEntry::VT_DESCRIPTOR_HASH, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for FileHashEntry<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "path", + Self::VT_PATH, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "descriptor_hash", + Self::VT_DESCRIPTOR_HASH, + false, + )? + .finish(); + Ok(()) + } + } + pub struct FileHashEntryArgs<'a> { + pub path: Option<::flatbuffers::WIPOffset<&'a str>>, + pub descriptor_hash: Option<::flatbuffers::WIPOffset<&'a str>>, + } + impl<'a> Default for FileHashEntryArgs<'a> { + #[inline] + fn default() -> Self { + FileHashEntryArgs { + path: None, + descriptor_hash: None, + } + } + } + + pub struct FileHashEntryBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> FileHashEntryBuilder<'a, 'b, A> { + #[inline] + pub fn add_path(&mut self, path: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + FileHashEntry::VT_PATH, + path, + ); + } + #[inline] + pub fn add_descriptor_hash( + &mut self, + descriptor_hash: ::flatbuffers::WIPOffset<&'b str>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + FileHashEntry::VT_DESCRIPTOR_HASH, + descriptor_hash, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> FileHashEntryBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + FileHashEntryBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for FileHashEntry<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("FileHashEntry"); + ds.field("path", &self.path()); + ds.field("descriptor_hash", &self.descriptor_hash()); + ds.finish() + } + } + pub enum MetadataHashOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct MetadataHash<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for MetadataHash<'a> { + type Inner = MetadataHash<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> MetadataHash<'a> { + pub const VT_FILES: ::flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + MetadataHash { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args MetadataHashArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = MetadataHashBuilder::new(_fbb); + if let Some(x) = args.files { + builder.add_files(x); + } + builder.finish() + } + + #[inline] + pub fn files( + &self, + ) -> Option< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>, + > { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector< + 'a, + ::flatbuffers::ForwardsUOffset, + >, + >>(MetadataHash::VT_FILES, None) + } + } + } + + impl ::flatbuffers::Verifiable for MetadataHash<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector< + '_, + ::flatbuffers::ForwardsUOffset, + >, + >>("files", Self::VT_FILES, false)? + .finish(); + Ok(()) + } + } + pub struct MetadataHashArgs<'a> { + pub files: Option< + ::flatbuffers::WIPOffset< + ::flatbuffers::Vector< + 'a, + ::flatbuffers::ForwardsUOffset>, + >, + >, + >, + } + impl<'a> Default for MetadataHashArgs<'a> { + #[inline] + fn default() -> Self { + MetadataHashArgs { files: None } + } + } + + pub struct MetadataHashBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> MetadataHashBuilder<'a, 'b, A> { + #[inline] + pub fn add_files( + &mut self, + files: ::flatbuffers::WIPOffset< + ::flatbuffers::Vector< + 'b, + ::flatbuffers::ForwardsUOffset>, + >, + >, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + MetadataHash::VT_FILES, + files, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> MetadataHashBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + MetadataHashBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for MetadataHash<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("MetadataHash"); + ds.field("files", &self.files()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `MetadataHash` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_metadata_hash_unchecked`. + pub fn root_as_metadata_hash( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `MetadataHash` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_metadata_hash_unchecked`. + pub fn size_prefixed_root_as_metadata_hash( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `MetadataHash` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_metadata_hash_unchecked`. + pub fn root_as_metadata_hash_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `MetadataHash` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_metadata_hash_unchecked`. + pub fn size_prefixed_root_as_metadata_hash_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a MetadataHash and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `MetadataHash`. + pub unsafe fn root_as_metadata_hash_unchecked(buf: &[u8]) -> MetadataHash<'_> { + unsafe { ::flatbuffers::root_unchecked::(buf) } + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed MetadataHash and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `MetadataHash`. + pub unsafe fn size_prefixed_root_as_metadata_hash_unchecked( + buf: &[u8], + ) -> MetadataHash<'_> { + unsafe { ::flatbuffers::size_prefixed_root_unchecked::(buf) } + } + #[inline] + pub fn finish_metadata_hash_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_metadata_hash_buffer< + 'a, + 'b, + A: ::flatbuffers::Allocator + 'a, + >( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } + } // pub mod hash + } // pub mod verity +} // pub mod cryptpilot diff --git a/cryptpilot-verity/src/metadata/mod.rs b/cryptpilot-verity/src/metadata/mod.rs index 84c5cf6..33973ee 100644 --- a/cryptpilot-verity/src/metadata/mod.rs +++ b/cryptpilot-verity/src/metadata/mod.rs @@ -8,7 +8,8 @@ mod metadata_generated; mod metadata_hash_generated; pub use metadata_generated::cryptpilot::verity::{ - FileInfo, FileInfoArgs, FsVerityDescriptor, FsVerityDescriptorArgs, Metadata, MetadataArgs, + FileInfo, FileInfoArgs, FsVerityDescriptor, FsVerityDescriptorArgs, KeyValue, KeyValueArgs, + Metadata, MetadataArgs, }; pub use metadata_hash_generated::cryptpilot::verity::hash::{ FileHashEntry, FileHashEntryArgs, MetadataHash, MetadataHashArgs, @@ -18,10 +19,17 @@ use anyhow::{bail, Result}; use flatbuffers::{FlatBufferBuilder, WIPOffset}; use sha2::digest::typenum::Unsigned; use sha2::{digest::OutputSizeUser, Digest, Sha256}; +use std::collections::BTreeMap; use verity_core::digest::{FsVeritySha256, InnerHash}; use verity_core::tree::MerkleTree; use verity_fuse::file_verifier::file_verity_info::FileVerityInfo; +/// Deserialized metadata containing file info and labels +pub struct MetadataInfo { + pub file_infos: Vec, + pub labels: BTreeMap, +} + /// Calculate fs-verity hash for file data pub fn calculate_fsverity_hash( data: &[u8], @@ -38,7 +46,10 @@ pub fn calculate_fsverity_hash( } /// Serialize file information to FlatBuffers format -pub fn serialize_metadata(file_infos: &[FileVerityInfo]) -> Result> { +pub fn serialize_metadata( + file_infos: &[FileVerityInfo], + labels: &BTreeMap, +) -> Result> { let mut builder = FlatBufferBuilder::new(); // Sort by path for stable output (using references to avoid copying) @@ -84,12 +95,31 @@ pub fn serialize_metadata(file_infos: &[FileVerityInfo]) -> Result> { let files_vector = builder.create_vector(&file_info_offsets); + // Build labels vector + let labels_vector = { + let mut label_offsets = Vec::with_capacity(labels.len()); + for (key, value) in labels { + let key_offset = builder.create_string(key); + let value_offset = builder.create_string(value); + let label = KeyValue::create( + &mut builder, + &KeyValueArgs { + key: Some(key_offset), + value: Some(value_offset), + }, + ); + label_offsets.push(label); + } + Some(builder.create_vector(&label_offsets)) + }; + // Create root Metadata table with version let metadata = Metadata::create( &mut builder, &MetadataArgs { version: 1, // Current metadata format version files: Some(files_vector), + labels: labels_vector, }, ); @@ -99,7 +129,7 @@ pub fn serialize_metadata(file_infos: &[FileVerityInfo]) -> Result> { } /// Deserialize file information from FlatBuffers format -pub fn deserialize_metadata(data: &[u8]) -> Result> { +pub fn deserialize_metadata(data: &[u8]) -> Result { let metadata = flatbuffers::root::(data) .map_err(|e| anyhow::anyhow!("Failed to parse FlatBuffers metadata: {}", e))?; @@ -187,7 +217,26 @@ pub fn deserialize_metadata(data: &[u8]) -> Result> { } } - Ok(result) + // Parse labels + let mut labels = BTreeMap::new(); + if let Some(labels_vec) = metadata.labels() { + for kv in labels_vec { + let key = kv + .key() + .ok_or_else(|| anyhow::anyhow!("Missing key in KeyValue"))? + .to_string(); + let value = kv + .value() + .ok_or_else(|| anyhow::anyhow!("Missing value in KeyValue"))? + .to_string(); + labels.insert(key, value); + } + } + + Ok(MetadataInfo { + file_infos: result, + labels, + }) } /// Calculate hash from full metadata binary @@ -275,15 +324,40 @@ mod tests { info.verify_self().unwrap(); let file_infos = vec![info]; + let mut labels = BTreeMap::new(); + labels.insert("env".to_string(), "prod".to_string()); - let serialized = serialize_metadata(&file_infos).unwrap(); + let serialized = serialize_metadata(&file_infos, &labels).unwrap(); let deserialized = deserialize_metadata(&serialized).unwrap(); - assert_eq!(file_infos.len(), deserialized.len()); - assert_eq!(file_infos[0].path, deserialized[0].path); + assert_eq!(file_infos.len(), deserialized.file_infos.len()); + assert_eq!(file_infos[0].path, deserialized.file_infos[0].path); assert_eq!( file_infos[0].descriptor_hash, - deserialized[0].descriptor_hash + deserialized.file_infos[0].descriptor_hash ); + assert_eq!(deserialized.labels.get("env"), Some(&"prod".to_string())); + } + + #[test] + fn test_serialize_deserialize_empty_labels() { + let test_data = b"test file content"; + let (descriptor, merkle_tree) = calculate_fsverity_hash(test_data); + let descriptor_hash = hex::encode(descriptor.to_descriptor_hash()); + let info = FileVerityInfo { + path: "test.txt".to_string(), + descriptor, + merkle_tree, + descriptor_hash, + }; + + let file_infos = vec![info]; + let labels = BTreeMap::new(); + + let serialized = serialize_metadata(&file_infos, &labels).unwrap(); + let deserialized = deserialize_metadata(&serialized).unwrap(); + + assert_eq!(file_infos.len(), deserialized.file_infos.len()); + assert!(deserialized.labels.is_empty()); } } diff --git a/debian/rules b/debian/rules index 99eb011..d8cd881 100755 --- a/debian/rules +++ b/debian/rules @@ -93,4 +93,7 @@ override_dh_auto_test: true override_dh_shlibdeps: - dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + # Exclude libfuse3 from auto-generated shlibdeps to avoid + # libfuse3-3 (Ubuntu) vs libfuse3-4 (Debian) incompatibility. + # The fuse3 dependency in debian/control covers both distros. + dh_shlibdeps --dpkg-shlibdeps-params="--ignore-missing-info -xlibfuse3" diff --git a/verity-core/make_testfiles.py b/verity-core/make_testfiles.py index f980e21..ade174a 100644 --- a/verity-core/make_testfiles.py +++ b/verity-core/make_testfiles.py @@ -51,14 +51,16 @@ def testfile(id): with testfile('hashblock_{}_{}'.format(i, j)) as f: f.write(b'A' * (block_size * (hashes_per_block + i) + j)) -with testfile('longfile') as f: - size = hashes_per_block * hashes_per_block * block_size * 3 + 99 - data = b'' - while len(data) < size: - print("size = {} / {}".format(len(data), size)) - data += ''.join('{:08x}'.format(len(data) + i) for i in range(0, 1024 * 1024 * 5, 8)).encode('ascii') - data = data[:size] - f.write(data) +# longfile is ~192MB and not used in tests (only referenced in a comment). +# Generate it manually if needed for local testing. +# with testfile('longfile') as f: +# size = hashes_per_block * hashes_per_block * block_size * 3 + 99 +# data = b'' +# while len(data) < size: +# print("size = {} / {}".format(len(data), size)) +# data += ''.join('{:08x}'.format(len(data) + i) for i in range(0, 1024 * 1024 * 5, 8)).encode('ascii') +# data = data[:size] +# f.write(data) # show results using: fsverity measure testfiles/* diff --git a/verity-fuse/src/filesystem.rs b/verity-fuse/src/filesystem.rs index 78dc64f..6c2c02c 100644 --- a/verity-fuse/src/filesystem.rs +++ b/verity-fuse/src/filesystem.rs @@ -122,7 +122,7 @@ impl VerityFS { // Compute which blocks overlap with the requested range let start_block = (requested_offset / block_size) as usize; - let end_block = ((requested_end + block_size - 1) / block_size) as usize; // ceil(requested_end / block_size) + let end_block = requested_end.div_ceil(block_size) as usize; let num_blocks = end_block - start_block; let cached_file = self.open_file_cached(file)?;