From 652015eaa7f2944de7b353a72a9c028ee79373e5 Mon Sep 17 00:00:00 2001 From: Quantum Explorer Date: Thu, 23 Apr 2026 15:59:03 +0800 Subject: [PATCH 1/2] test: cover document_type, drive votes/tokens/identity/shielded, drive-abci validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add 194 new unit tests across 31 files, all targeting error paths and edge cases per the coverage-PR pattern established. Per-target breakdown: - rs-dpp/data_contract/document_type (5 submodules, 45 tests): * class_methods/should_use_creator_id (50% → covered, 8 tests): every branch of v0 short-circuit, v1 transferable/trade_mode AND-chain, unknown-version error. * class_methods/try_from_schema v0 (12 tests) + v1 (12 tests): MissingPositions, DuplicateIndexName, UndefinedIndexProperty, InvalidIndexedPropertyConstraint (string + byte_array), InvalidTokenPosition, RedundantDocumentPaidForByTokenWithContractId, TokenPaymentByBurningOnlyAllowedOnInternalToken error variants; full_validation=false skip paths; schema.to_map() failure; Transferable::try_from(u8) error; insert_values_nested for nested object schemas; happy paths populating TokenCosts. * methods/mod.rs (62% → covered, 9 tests): requires_revision / initial_revision across 4 boolean branches, top_level_indices first-property-of-each, top_level_indices_of_contested_unique filter, sanitize_document_properties hex→bytes conversion and unknown-fields pass-through. * v1/mod.rs (38% → covered, 4 tests): From all-fields + TokenCostsV0 default, properties/indices preservation, all 6 TokenCost setters, trait dispatch (DocumentTypeBasicMethods + DocumentTypeV0Methods on V1). - rs-dpp/data_contract/document_type/property/mod.rs (88% → covered, ~74 tests): random_value/random_sub_filled_value/ random_filled_value for all type variants; read_optionally_from error paths for every scalar with truncated buffers; encode type-mismatch error arms; try_from_value_map extra branches (string without sizes, enum-driven integer, non-identifier content-media-type); find_integer_type_for_min_and_max_values negative-range (I8/I16/I32/I64); sanitize_value_mut base64 fallback / size-constraint rejection / fixed-size Bytes20/32/36; value_from_string I64/U128/I128 overflow + negative-u8 + boolean empty + byte-array exact bytes. - rs-drive/drive/votes/insert (67% → covered, 4 tests): ContestedIndexNotFound + DataContractError in the operations path for register + insert_stored_info. - rs-drive/drive/tokens/status (70% → covered, 9 tests): fetch-missing None, paused↔active round-trip, stateless branch, non-Item CorruptedElementType, undecodable-Item rejection, mixed-present-and-absent batch, empty-id-list short-circuit, with_costs FeeResult, GroveDB InvalidQuery for limit=0 in prove. - rs-drive/drive/address_funds/prove (75% → covered, 10 tests): prove_balance_and_nonce round-trip + operations + absence proof + transactions-not-supported; prove_address_funds_branch_query depth-below-min / depth-above-max InvalidInput + unknown- checkpoint + no-ops-on-validation-error; prove_address_funds _trunk_query operations-populate-ops. - rs-drive/drive/identity/contract_info (82% → covered, 10 tests): fetch_identity_contract_nonce no-contract-identity None + no- identity None + after-merge round-trip + with_fees + stateless; prove_identity_contract_nonce absent + presence-differs-from- absence; merge_identity_contract_nonce estimation-mode success + stateless-layer-info + ops population. - rs-drive/drive/shielded/nullifiers (81% → covered, 15 tests): store empty-noop + round-trip + compaction-on-threshold + transaction-commit semantics; fetch empty-pool + start-height skip + limit-honored + CorruptedSerialization; fetch_compacted limit=0 + empty + past-range + inside-range + undecodable; compact empty + same-timestamp-append + cross-block-order; cleanup_expired future-untouched (strict boundary) + past- removed + undecodable-expiration. - rs-drive-abci/execution/validation/state_transition/common (86% → covered, 18 tests across 6 files): validate_identity _exists absent/present + RetrieveIdentity op recording; validate_non_masternode_identity_exists missing/present-with- master-key; validate_identity_public_key_ids_dont_exist no-dup + duplicate-BasicError + empty-list; validate_identity_public _key_ids_exist all-missing StateError + all-present + partial- overlap reporting only missing id; asset_lock/proof/verify_is _not_spent NotPresent + FullyConsumed + PartiallyConsumed --- .../should_use_creator_id/mod.rs | 140 ++ .../class_methods/try_from_schema/v0/mod.rs | 457 ++++++ .../class_methods/try_from_schema/v1/mod.rs | 531 +++++++ .../document_type/methods/mod.rs | 350 +++++ .../document_type/property/mod.rs | 1330 +++++++++++++++++ .../src/data_contract/document_type/v1/mod.rs | 183 +++ .../check_tx_verification/v0/mod.rs | 77 + .../proof/verify_is_not_spent/v0/mod.rs | 275 ++++ .../v0/mod.rs | 110 ++ .../common/validate_identity_exists/v0/mod.rs | 91 ++ .../v0/mod.rs | 155 ++ .../v0/mod.rs | 170 +++ .../v0/mod.rs | 88 ++ .../src/query/document_query/v0/mod.rs | 304 ++++ .../v0/mod.rs | 118 ++ .../prove_address_funds_trunk_query/v0/mod.rs | 65 + .../prove/prove_balance_and_nonce/v0/mod.rs | 108 ++ .../fetch_identity_contract_nonce/v0/mod.rs | 162 ++ .../merge_identity_contract_nonce/v0/mod.rs | 66 + .../prove_identity_contract_nonce/v0/mod.rs | 99 ++ .../v0/mod.rs | 106 ++ .../nullifiers/compact_nullifiers/v0/mod.rs | 110 ++ .../fetch_compacted_nullifiers/v0/mod.rs | 139 ++ .../nullifiers/fetch_nullifiers/v0/mod.rs | 107 ++ .../nullifiers/store_nullifiers/v0/mod.rs | 151 ++ .../tokens/status/fetch_token_status/mod.rs | 61 + .../status/fetch_token_status/v0/mod.rs | 158 ++ .../status/fetch_token_statuses/v0/mod.rs | 86 ++ .../tokens/status/prove_token_statuses/mod.rs | 41 + .../v0/mod.rs | 82 + .../v0/mod.rs | 80 + 31 files changed, 6000 insertions(+) diff --git a/packages/rs-dpp/src/data_contract/document_type/class_methods/should_use_creator_id/mod.rs b/packages/rs-dpp/src/data_contract/document_type/class_methods/should_use_creator_id/mod.rs index f881d019a71..0f0e3620fbe 100644 --- a/packages/rs-dpp/src/data_contract/document_type/class_methods/should_use_creator_id/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/class_methods/should_use_creator_id/mod.rs @@ -88,3 +88,143 @@ fn should_use_creator_id_class_method( }), } } + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + + /// Returns a clone of PlatformVersion::latest() with `should_add_creator_id` + /// forced to the requested value. + fn platform_version_with_should_add_creator_id(version: u16) -> PlatformVersion { + let mut v = PlatformVersion::latest().clone(); + v.dpp + .contract_versions + .document_type_versions + .schema + .should_add_creator_id = version; + v + } + + // ------------------------------------------------------------------ + // Version 0: always returns false regardless of inputs + // ------------------------------------------------------------------ + #[test] + fn version_0_always_returns_false_even_if_transferable_and_trade_mode_set() { + let v = platform_version_with_should_add_creator_id(0); + let result = should_use_creator_id_class_method( + 1, + 1, + Transferable::Always, + TradeMode::DirectPurchase, + &v, + ) + .expect("should not error"); + assert!(!result); + } + + // ------------------------------------------------------------------ + // Version 1: contract_version_type > 0 required + // ------------------------------------------------------------------ + #[test] + fn version_1_contract_version_zero_returns_false() { + let v = platform_version_with_should_add_creator_id(1); + // contract_version_type = 0 short-circuits to false + let result = should_use_creator_id_class_method( + 0, + 1, + Transferable::Always, + TradeMode::DirectPurchase, + &v, + ) + .expect("should not error"); + assert!(!result); + } + + #[test] + fn version_1_contract_config_version_zero_returns_false() { + let v = platform_version_with_should_add_creator_id(1); + let result = should_use_creator_id_class_method( + 1, + 0, + Transferable::Always, + TradeMode::DirectPurchase, + &v, + ) + .expect("should not error"); + assert!(!result); + } + + #[test] + fn version_1_non_transferable_and_no_trade_mode_returns_false() { + let v = platform_version_with_should_add_creator_id(1); + let result = + should_use_creator_id_class_method(1, 1, Transferable::Never, TradeMode::None, &v) + .expect("should not error"); + assert!(!result); + } + + #[test] + fn version_1_transferable_returns_true() { + let v = platform_version_with_should_add_creator_id(1); + let result = + should_use_creator_id_class_method(1, 1, Transferable::Always, TradeMode::None, &v) + .expect("should not error"); + assert!(result); + } + + #[test] + fn version_1_trade_mode_set_returns_true_even_when_not_transferable() { + let v = platform_version_with_should_add_creator_id(1); + let result = should_use_creator_id_class_method( + 1, + 1, + Transferable::Never, + TradeMode::DirectPurchase, + &v, + ) + .expect("should not error"); + assert!(result); + } + + #[test] + fn version_1_both_transferable_and_trade_mode_returns_true() { + let v = platform_version_with_should_add_creator_id(1); + let result = should_use_creator_id_class_method( + 1, + 1, + Transferable::Always, + TradeMode::DirectPurchase, + &v, + ) + .expect("should not error"); + assert!(result); + } + + // ------------------------------------------------------------------ + // Unknown version error path + // ------------------------------------------------------------------ + #[test] + fn unknown_version_returns_unknown_version_mismatch_error() { + let v = platform_version_with_should_add_creator_id(255); + let result = should_use_creator_id_class_method( + 1, + 1, + Transferable::Always, + TradeMode::DirectPurchase, + &v, + ); + assert_matches!( + result, + Err(ProtocolError::UnknownVersionMismatch { + method, + known_versions, + received, + }) => { + assert_eq!(method, "DocumentType::should_use_creator_id"); + assert_eq!(known_versions, vec![0, 1]); + assert_eq!(received, 255); + } + ); + } +} diff --git a/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v0/mod.rs b/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v0/mod.rs index aa1ae0fe9b4..5bedb1d5edc 100644 --- a/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v0/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v0/mod.rs @@ -798,4 +798,461 @@ mod tests { ); } } + + mod error_paths { + use super::*; + + fn default_config() -> DataContractConfig { + DataContractConfig::default_for_version(PlatformVersion::latest()) + .expect("should create a default config") + } + + // -------- MissingPositionsInDocumentTypePropertiesError -------- + #[test] + fn non_continuous_positions_returns_missing_positions_error() { + let platform_version = PlatformVersion::latest(); + // positions 0 and 2 — 1 is missing + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32}, + "c": {"type": "string", "position": 2, "maxLength": 10_u32}, + }, + "additionalProperties": false, + }); + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::MissingPositionsInDocumentTypePropertiesError(_) + ) + ) + } + ); + } + + // -------- DuplicateIndexNameError -------- + #[test] + fn duplicate_index_name_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 60_u32}, + "field_b": {"type": "string", "position": 1, "maxLength": 60_u32}, + }, + "indices": [ + { + "name": "dup", + "properties": [{"field_a": "asc"}], + }, + { + "name": "dup", + "properties": [{"field_b": "asc"}], + }, + ], + "additionalProperties": false, + }); + + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError(BasicError::DuplicateIndexNameError(_)) + ) + } + ); + } + + // -------- UndefinedIndexPropertyError -------- + #[test] + fn undefined_index_property_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 60_u32}, + }, + "indices": [ + { + "name": "by_unknown", + "properties": [{"unknown_field": "asc"}], + }, + ], + "additionalProperties": false, + }); + + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError(BasicError::UndefinedIndexPropertyError(_)) + ) + } + ); + } + + // -------- InvalidIndexedPropertyConstraintError: string maxLength too large -------- + #[test] + fn indexed_string_exceeding_max_length_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "big_string": { + "type": "string", + "position": 0, + // Above MAX_INDEXED_STRING_PROPERTY_LENGTH (63) + "maxLength": 1000_u32, + }, + }, + "indices": [ + { + "name": "byBigString", + "properties": [{"big_string": "asc"}], + }, + ], + "additionalProperties": false, + }); + + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::InvalidIndexedPropertyConstraintError(_) + ) + ) + } + ); + } + + // -------- InvalidIndexedPropertyConstraintError: byte-array maxItems too large -------- + #[test] + fn indexed_byte_array_exceeding_max_items_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "big_bytes": { + "type": "array", + "byteArray": true, + // Above MAX_INDEXED_BYTE_ARRAY_PROPERTY_LENGTH (255) + "maxItems": 1000_u32, + "position": 0, + }, + }, + "indices": [ + { + "name": "byBigBytes", + "properties": [{"big_bytes": "asc"}], + }, + ], + "additionalProperties": false, + }); + + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::InvalidIndexedPropertyConstraintError(_) + ) + ) + } + ); + } + + // -------- Valid: indexed string at the size limit succeeds -------- + #[test] + fn indexed_string_at_exact_max_length_is_accepted() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "ok_string": { + "type": "string", + "position": 0, + "maxLength": MAX_INDEXED_STRING_PROPERTY_LENGTH as u32, + }, + }, + "indices": [ + { + "name": "byOk", + "properties": [{"ok_string": "asc"}], + }, + ], + "additionalProperties": false, + }); + + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert!(result.is_ok(), "expected Ok, got {:?}", result.err()); + } + + // -------- Valid: full_validation=false skips all validation -------- + #[test] + fn skip_validation_accepts_invalid_name_when_full_validation_false() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 10_u32} + }, + "additionalProperties": false, + }); + + // Name "invalid name" has a space which is not allowed — but skipping + // validation should let this through. + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "invalid name", + schema, + None, + &default_config(), + false, // full_validation = false + &mut vec![], + platform_version, + ); + assert!( + result.is_ok(), + "full_validation=false should skip name check, got {:?}", + result.err() + ); + } + + // -------- schema_map error path: schema must be object -------- + #[test] + fn non_object_schema_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!("not_an_object"); + + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + false, // skip JSON-schema validation so we exercise .to_map() error path + &mut vec![], + platform_version, + ); + assert!( + result.is_err(), + "non-object schema must fail, got {:?}", + result + ); + } + + // -------- System properties and required_fields interplay -------- + #[test] + fn required_fields_are_tracked_on_successful_build() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 10_u32}, + "field_b": {"type": "string", "position": 1, "maxLength": 10_u32}, + }, + "required": ["field_a"], + "additionalProperties": false, + }); + + let dt = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ) + .expect("should succeed"); + assert!(dt.required_fields.contains("field_a")); + assert!(!dt.required_fields.contains("field_b")); + } + + // -------- transient_fields handling -------- + #[test] + fn transient_fields_are_tracked_on_successful_build() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "temp_field": {"type": "string", "position": 0, "maxLength": 10_u32}, + "perm_field": {"type": "string", "position": 1, "maxLength": 10_u32}, + }, + "transient": ["temp_field"], + "additionalProperties": false, + }); + + let dt = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ) + .expect("should succeed"); + assert!(dt.transient_fields.contains("temp_field")); + assert!(!dt.transient_fields.contains("perm_field")); + } + + // -------- Nested object properties produce flattened + nested ---- + #[test] + fn nested_object_properties_are_both_flattened_and_nested() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "outer": { + "type": "object", + "position": 0, + "properties": { + "inner": { + "type": "string", + "position": 0, + "maxLength": 10_u32, + } + }, + "additionalProperties": false, + } + }, + "additionalProperties": false, + }); + let dt = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + true, + &mut vec![], + platform_version, + ) + .expect("should succeed"); + // flattened form uses dotted path + assert!(dt.flattened_properties.contains_key("outer.inner")); + // nested form keeps the Object wrapper + assert!(dt.properties.contains_key("outer")); + } + + // -------- TRANSFERABLE u8 conversion -------- + #[test] + fn invalid_transferable_integer_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + // 3 is not a valid Transferable value (only 0 or 1) + "transferable": 3_u64, + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 10_u32} + }, + "additionalProperties": false, + }); + let result = DocumentTypeV0::try_from_schema( + Identifier::new([1; 32]), + 0, + default_config().version(), + "doc", + schema, + None, + &default_config(), + false, // skip schema validation; this is the try_into() failure path + &mut vec![], + platform_version, + ); + assert!(result.is_err()); + } + } } diff --git a/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v1/mod.rs b/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v1/mod.rs index 0b956f8c917..adf85180244 100644 --- a/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v1/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/class_methods/try_from_schema/v1/mod.rs @@ -1061,4 +1061,535 @@ mod tests { ); } } + + mod error_paths { + use super::*; + use crate::data_contract::document_type::token_costs::accessors::TokenCostGettersV0; + + fn default_config() -> DataContractConfig { + DataContractConfig::default_for_version(PlatformVersion::latest()) + .expect("should create a default config") + } + + // ---------- Index errors ---------- + #[test] + fn duplicate_index_name_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + "b": {"type": "string", "position": 1, "maxLength": 40_u32}, + }, + "indices": [ + {"name": "dup", "properties": [{"a": "asc"}]}, + {"name": "dup", "properties": [{"b": "asc"}]}, + ], + "additionalProperties": false, + }); + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError(BasicError::DuplicateIndexNameError(_)) + ) + } + ); + } + + #[test] + fn undefined_index_property_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "indices": [ + {"name": "idx", "properties": [{"missing": "asc"}]}, + ], + "additionalProperties": false, + }); + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError(BasicError::UndefinedIndexPropertyError(_)) + ) + } + ); + } + + #[test] + fn missing_positions_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32}, + "c": {"type": "string", "position": 2, "maxLength": 10_u32}, + }, + "additionalProperties": false, + }); + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::MissingPositionsInDocumentTypePropertiesError(_) + ) + ) + } + ); + } + + #[test] + fn indexed_string_exceeding_max_length_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "big": {"type": "string", "position": 0, "maxLength": 1000_u32}, + }, + "indices": [ + {"name": "byBig", "properties": [{"big": "asc"}]}, + ], + "additionalProperties": false, + }); + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::InvalidIndexedPropertyConstraintError(_) + ) + ) + } + ); + } + + // ---------- Token cost: InvalidTokenPositionError ---------- + #[test] + fn token_cost_with_unknown_position_and_no_contract_id_errors() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "tokenCost": { + "create": { + // No contractId and an unknown tokenPosition -> error + "tokenPosition": 99_u64, + "amount": 1_u64, + } + }, + "additionalProperties": false, + }); + + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), // no token configurations + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError(BasicError::InvalidTokenPositionError(_)) + ) + } + ); + } + + // ---------- Token cost: RedundantDocumentPaidForByTokenWithContractId ---------- + #[test] + fn token_cost_with_own_contract_id_errors_redundant() { + let platform_version = PlatformVersion::latest(); + let own_id = Identifier::new([42; 32]); + + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "tokenCost": { + "create": { + "contractId": own_id.to_buffer(), + "tokenPosition": 0_u64, + "amount": 1_u64, + } + }, + "additionalProperties": false, + }); + + let result = DocumentTypeV1::try_from_schema( + own_id, + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::RedundantDocumentPaidForByTokenWithContractId(_) + ) + ) + } + ); + } + + // ---------- Token cost: BurnToken on external contract is not allowed ---------- + #[test] + fn burn_token_on_external_contract_returns_error() { + let platform_version = PlatformVersion::latest(); + let own_id = Identifier::new([42; 32]); + let external_id = Identifier::new([99; 32]); + + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "tokenCost": { + "create": { + "contractId": external_id.to_buffer(), + "tokenPosition": 0_u64, + "amount": 1_u64, + "effect": 1_u64, // BurnToken + } + }, + "additionalProperties": false, + }); + + let result = DocumentTypeV1::try_from_schema( + own_id, + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ); + assert_matches!( + result, + Err(ProtocolError::ConsensusError(boxed)) => { + assert_matches!( + boxed.as_ref(), + ConsensusError::BasicError( + BasicError::TokenPaymentByBurningOnlyAllowedOnInternalTokenError(_) + ) + ) + } + ); + } + + // ---------- Token cost: valid external contract transfer is accepted ---------- + #[test] + fn valid_token_cost_with_external_contract_is_accepted() { + let platform_version = PlatformVersion::latest(); + let own_id = Identifier::new([42; 32]); + let external_id = Identifier::new([99; 32]); + + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "tokenCost": { + "create": { + "contractId": external_id.to_buffer(), + "tokenPosition": 0_u64, + "amount": 5_u64, + "effect": 0_u64, // TransferTokenToContractOwner + } + }, + "additionalProperties": false, + }); + + let dt = DocumentTypeV1::try_from_schema( + own_id, + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ) + .expect("should be accepted"); + // The create cost should be populated + let cost = dt.token_costs.document_creation_token_cost(); + assert!(cost.is_some()); + let cost = cost.unwrap(); + assert_eq!(cost.token_amount, 5); + assert_eq!(cost.token_contract_position, 0); + assert_eq!(cost.contract_id, Some(external_id)); + } + + // ---------- With full_validation = false, token cost validations are skipped + #[test] + fn invalid_token_cost_without_validation_still_constructs() { + let platform_version = PlatformVersion::latest(); + let own_id = Identifier::new([42; 32]); + + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "tokenCost": { + "create": { + // own contract id but validation skipped + "contractId": own_id.to_buffer(), + "tokenPosition": 0_u64, + "amount": 1_u64, + } + }, + "additionalProperties": false, + }); + + let dt = DocumentTypeV1::try_from_schema( + own_id, + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + false, // skip validation + &mut vec![], + platform_version, + ) + .expect("should construct without validation"); + assert!(dt.token_costs.document_creation_token_cost().is_some()); + } + + // ---------- TRANSFERABLE u8 conversion failure path ---------- + #[test] + fn invalid_transferable_integer_returns_error() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "transferable": 7_u64, + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32} + }, + "additionalProperties": false, + }); + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + false, // skip schema validation + &mut vec![], + platform_version, + ); + assert!(result.is_err()); + } + + // ---------- Non-object schema fails in .to_map() ---------- + #[test] + fn non_object_schema_returns_error_without_validation() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!("not_an_object"); + let result = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + false, + &mut vec![], + platform_version, + ); + assert!(result.is_err()); + } + + // ---------- Valid schema with all optional configuration fields set ---------- + #[test] + fn full_config_options_are_preserved_on_successful_build() { + let platform_version = PlatformVersion::latest(); + let schema = platform_value!({ + "type": "object", + "documentsKeepHistory": true, + "documentsMutable": true, + "canBeDeleted": false, + "transferable": 1_u64, + "tradeMode": 1_u64, + "creationRestrictionMode": 1_u64, + "signatureSecurityLevelRequirement": 1_u64, + "requiresIdentityEncryptionBoundedKey": 0_u64, + "requiresIdentityDecryptionBoundedKey": 0_u64, + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32}, + }, + "additionalProperties": false, + }); + let dt = DocumentTypeV1::try_from_schema( + Identifier::new([1; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &BTreeMap::new(), + &default_config(), + true, + &mut vec![], + platform_version, + ) + .expect("should build"); + assert!(dt.documents_keep_history); + assert!(dt.documents_mutable); + assert!(!dt.documents_can_be_deleted); + assert!(dt.documents_transferable.is_transferable()); + // Non-default SecurityLevel was parsed (1 = CRITICAL vs default HIGH) + assert_eq!(dt.security_level_requirement, SecurityLevel::CRITICAL); + assert!(dt.requires_identity_encryption_bounded_key.is_some()); + assert!(dt.requires_identity_decryption_bounded_key.is_some()); + } + + // ---------- v1 behavior: BurnToken is allowed if contract is "own" (no contractId) ---------- + #[test] + fn burn_effect_on_own_contract_is_allowed_when_token_configured() { + use crate::data_contract::associated_token::token_configuration::v0::TokenConfigurationV0; + use crate::data_contract::associated_token::token_configuration::TokenConfiguration; + use crate::data_contract::TokenContractPosition; + use platform_value::string_encoding::Encoding; + + let platform_version = PlatformVersion::latest(); + + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 40_u32}, + }, + "tokenCost": { + "create": { + // No contractId => "own contract"; Burn is allowed + "tokenPosition": 0_u64, + "amount": 1_u64, + "effect": 1_u64, + } + }, + "additionalProperties": false, + }); + + let token_cfg = TokenConfigurationV0::default_most_restrictive(); + let mut token_configurations: BTreeMap = + BTreeMap::new(); + token_configurations.insert(0, TokenConfiguration::V0(token_cfg)); + + // Also silence an unused-import warning on Encoding in case the compile path differs. + let _ = Encoding::Base58; + + let dt = DocumentTypeV1::try_from_schema( + Identifier::new([42; 32]), + 1, + default_config().version(), + "doc", + schema, + None, + &token_configurations, + &default_config(), + true, + &mut vec![], + platform_version, + ) + .expect("should construct with own-contract burn"); + assert!(dt.token_costs.document_creation_token_cost().is_some()); + } + } } diff --git a/packages/rs-dpp/src/data_contract/document_type/methods/mod.rs b/packages/rs-dpp/src/data_contract/document_type/methods/mod.rs index ff2ffa93e22..ca52f02ec6a 100644 --- a/packages/rs-dpp/src/data_contract/document_type/methods/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/methods/mod.rs @@ -344,3 +344,353 @@ pub trait DocumentTypeV0Methods: DocumentTypeV0Getters + DocumentTypeV0MethodsVe } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::data_contract::config::DataContractConfig; + use crate::data_contract::document_type::DocumentType; + use platform_value::{platform_value, Identifier}; + + /// Build a document type from a schema using latest platform version. + fn build_doc_type(name: &str, schema: Value) -> DocumentType { + let platform_version = PlatformVersion::latest(); + let config = DataContractConfig::default_for_version(platform_version) + .expect("should create default config"); + DocumentType::try_from_schema( + Identifier::new([1; 32]), + 1, + config.version(), + name, + schema, + None, + &BTreeMap::new(), + &config, + false, + &mut Vec::new(), + platform_version, + ) + .expect("should build doc type") + } + + // -------------------------------------------------------------- + // DocumentTypeBasicMethods::requires_revision / initial_revision + // -------------------------------------------------------------- + #[test] + fn requires_revision_false_when_immutable_non_transferable_and_no_trade_mode() { + let schema = platform_value!({ + "type": "object", + "documentsMutable": false, + "transferable": 0_u64, + "tradeMode": 0_u64, + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 20_u32} + }, + "additionalProperties": false, + }); + let dt = build_doc_type("immutable_doc", schema); + // Requires revision false => initial_revision must be None + assert!(!dt.as_ref().requires_revision_ref()); + assert_eq!(dt.as_ref().initial_revision_ref(), None); + } + + #[test] + fn requires_revision_true_when_mutable() { + let schema = platform_value!({ + "type": "object", + "documentsMutable": true, + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 20_u32} + }, + "additionalProperties": false, + }); + let dt = build_doc_type("mutable_doc", schema); + assert!(dt.as_ref().requires_revision_ref()); + assert_eq!(dt.as_ref().initial_revision_ref(), Some(INITIAL_REVISION)); + } + + #[test] + fn requires_revision_true_when_transferable_even_if_immutable() { + let schema = platform_value!({ + "type": "object", + "documentsMutable": false, + "transferable": 1_u64, + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 20_u32} + }, + "additionalProperties": false, + }); + let dt = build_doc_type("transferable_doc", schema); + assert!(dt.as_ref().requires_revision_ref()); + assert_eq!(dt.as_ref().initial_revision_ref(), Some(INITIAL_REVISION)); + } + + #[test] + fn requires_revision_true_when_trade_mode_seller_sets_price() { + let schema = platform_value!({ + "type": "object", + "documentsMutable": false, + "tradeMode": 1_u64, // DirectPurchase -> seller_sets_price = true + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 20_u32} + }, + "additionalProperties": false, + }); + let dt = build_doc_type("nft_doc", schema); + assert!(dt.as_ref().requires_revision_ref()); + } + + // -------------------------------------------------------------- + // DocumentTypeBasicMethods::top_level_indices and + // top_level_indices_of_contested_unique_indexes + // -------------------------------------------------------------- + #[test] + fn top_level_indices_returns_first_property_of_each_index() { + let schema = platform_value!({ + "type": "object", + "properties": { + "first_name": {"type": "string", "position": 0, "maxLength": 60_u32}, + "last_name": {"type": "string", "position": 1, "maxLength": 60_u32} + }, + "indices": [ + { + "name": "byFirst", + "properties": [{"first_name": "asc"}], + }, + { + "name": "byLast", + "properties": [{"last_name": "asc"}], + }, + ], + "additionalProperties": false, + }); + let dt = build_doc_type("person", schema); + let dt_ref = dt.as_ref(); + let top: Vec<&IndexProperty> = dt_ref.top_level_indices_ref(); + // Two indices each contribute their first property + assert_eq!(top.len(), 2); + let names: Vec<&str> = top.iter().map(|p| p.name.as_str()).collect(); + assert!(names.contains(&"first_name")); + assert!(names.contains(&"last_name")); + } + + #[test] + fn top_level_indices_of_contested_unique_indexes_excludes_non_contested() { + let schema = platform_value!({ + "type": "object", + "documentsMutable": false, + "properties": { + "first_name": {"type": "string", "position": 0, "maxLength": 60_u32}, + "last_name": {"type": "string", "position": 1, "maxLength": 60_u32} + }, + "indices": [ + { + "name": "byFirst", + "properties": [{"first_name": "asc"}], + }, + { + "name": "byLast", + "properties": [{"last_name": "asc"}], + }, + ], + "additionalProperties": false, + }); + let dt = build_doc_type("person_no_contested", schema); + let dt_ref = dt.as_ref(); + let contested = dt_ref.top_level_indices_of_contested_unique_indexes_ref(); + // Neither index is contested + assert!(contested.is_empty()); + } + + // -------------------------------------------------------------- + // DocumentTypeBasicMethods::unique_id_for_document_field + // -------------------------------------------------------------- + #[test] + fn unique_id_for_document_field_concatenates_identifier_and_base_event() { + let schema = platform_value!({ + "type": "object", + "properties": { + "field_a": {"type": "string", "position": 0, "maxLength": 20_u32} + }, + "additionalProperties": false, + }); + let dt = build_doc_type("events", schema); + let dt_ref = dt.as_ref(); + let index_level = dt_ref.index_structure_ref(); + let base_event: [u8; 32] = [7; 32]; + let out = dt_ref.unique_id_for_document_field_ref(index_level, base_event); + // Output must be 8 bytes (u64 identifier) + 32 bytes (base_event) = 40 + assert_eq!(out.len(), 8 + 32); + // Last 32 bytes must match base_event exactly + assert_eq!(&out[8..], &base_event); + // First 8 bytes must be identifier BE bytes + let id_bytes = index_level.identifier().to_be_bytes(); + assert_eq!(&out[..8], &id_bytes); + } + + // -------------------------------------------------------------- + // DocumentTypeV0Methods::sanitize_document_properties + // -------------------------------------------------------------- + #[test] + fn sanitize_document_properties_converts_hex_bytearray_to_bytes() { + let schema = platform_value!({ + "type": "object", + "properties": { + "payload": { + "type": "array", + "byteArray": true, + "minItems": 1_u32, + "maxItems": 64_u32, + "position": 0 + } + }, + "additionalProperties": false, + }); + let dt = build_doc_type("blob_doc", schema); + // 4-byte hex string + let mut props: BTreeMap = BTreeMap::new(); + props.insert("payload".to_string(), Value::Text("deadbeef".to_string())); + + dt.as_ref().sanitize_document_properties_ref(&mut props); + + let got = props.get("payload").unwrap(); + match got { + Value::Bytes(bytes) => assert_eq!(bytes.as_slice(), &[0xde, 0xad, 0xbe, 0xef]), + other => panic!("expected sanitized Bytes, got {:?}", other), + } + } + + #[test] + fn sanitize_document_properties_leaves_unknown_fields_untouched() { + let schema = platform_value!({ + "type": "object", + "properties": { + "known": {"type": "string", "position": 0, "maxLength": 20_u32} + }, + "additionalProperties": false, + }); + let dt = build_doc_type("known_only", schema); + let mut props: BTreeMap = BTreeMap::new(); + props.insert( + "unknown_field".to_string(), + Value::Text("abcdef".to_string()), + ); + props.insert("known".to_string(), Value::Text("hello".to_string())); + + dt.as_ref().sanitize_document_properties_ref(&mut props); + + // unknown_field should be unchanged + assert_eq!( + props.get("unknown_field").unwrap(), + &Value::Text("abcdef".to_string()) + ); + // known stays a string (no sanitization applies for String type) + assert_eq!( + props.get("known").unwrap(), + &Value::Text("hello".to_string()) + ); + } + + // -------------------------------------------------------------- + // Helper extensions so we can dispatch to the underlying + // DocumentTypeV0/V1 via the enum without exposing new API. + // (Implemented inline for the tests.) + // -------------------------------------------------------------- + trait DocumentTypeTestHelpers { + fn requires_revision_ref(&self) -> bool; + fn initial_revision_ref(&self) -> Option; + fn top_level_indices_ref(&self) -> Vec<&IndexProperty>; + fn top_level_indices_of_contested_unique_indexes_ref(&self) -> Vec<&IndexProperty>; + fn index_structure_ref(&self) -> &IndexLevel; + fn unique_id_for_document_field_ref( + &self, + index_level: &IndexLevel, + base_event: [u8; 32], + ) -> Vec; + fn sanitize_document_properties_ref(&self, properties: &mut BTreeMap); + } + + impl<'a> DocumentTypeTestHelpers for crate::data_contract::document_type::DocumentTypeRef<'a> { + fn requires_revision_ref(&self) -> bool { + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.requires_revision() + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.requires_revision() + } + } + } + + fn initial_revision_ref(&self) -> Option { + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.initial_revision() + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.initial_revision() + } + } + } + + fn top_level_indices_ref(&self) -> Vec<&IndexProperty> { + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.top_level_indices() + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.top_level_indices() + } + } + } + + fn top_level_indices_of_contested_unique_indexes_ref(&self) -> Vec<&IndexProperty> { + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.top_level_indices_of_contested_unique_indexes() + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.top_level_indices_of_contested_unique_indexes() + } + } + } + + fn index_structure_ref(&self) -> &IndexLevel { + use crate::data_contract::document_type::accessors::DocumentTypeV0Getters; + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.index_structure() + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.index_structure() + } + } + } + + fn unique_id_for_document_field_ref( + &self, + index_level: &IndexLevel, + base_event: [u8; 32], + ) -> Vec { + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.unique_id_for_document_field(index_level, base_event) + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.unique_id_for_document_field(index_level, base_event) + } + } + } + + fn sanitize_document_properties_ref(&self, properties: &mut BTreeMap) { + match self { + crate::data_contract::document_type::DocumentTypeRef::V0(v0) => { + v0.sanitize_document_properties(properties) + } + crate::data_contract::document_type::DocumentTypeRef::V1(v1) => { + v1.sanitize_document_properties(properties) + } + } + } + } +} diff --git a/packages/rs-dpp/src/data_contract/document_type/property/mod.rs b/packages/rs-dpp/src/data_contract/document_type/property/mod.rs index f1f63482de4..cd20adc950c 100644 --- a/packages/rs-dpp/src/data_contract/document_type/property/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/property/mod.rs @@ -5664,4 +5664,1334 @@ mod tests { assert_eq!(result.len(), 17); // 1 marker + 16 bytes assert_eq!(result[0], 0xFF); } + + // ----------------------------------------------------------------------- + // random_value() tests - exercise branches not covered elsewhere + // ----------------------------------------------------------------------- + + use rand::SeedableRng; + + #[test] + fn test_random_value_produces_expected_type_for_all_scalar_variants() { + let mut rng = StdRng::seed_from_u64(1); + assert!(matches!( + DocumentPropertyType::U128.random_value(&mut rng), + Value::U128(_) + )); + assert!(matches!( + DocumentPropertyType::I128.random_value(&mut rng), + Value::I128(_) + )); + assert!(matches!( + DocumentPropertyType::U64.random_value(&mut rng), + Value::U64(_) + )); + assert!(matches!( + DocumentPropertyType::I64.random_value(&mut rng), + Value::I64(_) + )); + assert!(matches!( + DocumentPropertyType::U32.random_value(&mut rng), + Value::U32(_) + )); + assert!(matches!( + DocumentPropertyType::I32.random_value(&mut rng), + Value::I32(_) + )); + assert!(matches!( + DocumentPropertyType::U16.random_value(&mut rng), + Value::U16(_) + )); + assert!(matches!( + DocumentPropertyType::I16.random_value(&mut rng), + Value::I16(_) + )); + assert!(matches!( + DocumentPropertyType::U8.random_value(&mut rng), + Value::U8(_) + )); + assert!(matches!( + DocumentPropertyType::I8.random_value(&mut rng), + Value::I8(_) + )); + assert!(matches!( + DocumentPropertyType::F64.random_value(&mut rng), + Value::Float(_) + )); + assert!(matches!( + DocumentPropertyType::Boolean.random_value(&mut rng), + Value::Bool(_) + )); + assert!(matches!( + DocumentPropertyType::Date.random_value(&mut rng), + Value::Float(_) + )); + assert!(matches!( + DocumentPropertyType::Identifier.random_value(&mut rng), + Value::Identifier(_) + )); + } + + #[test] + fn test_random_value_string_respects_size_bounds() { + let mut rng = StdRng::seed_from_u64(2); + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: Some(5), + max_length: Some(10), + }); + // Exercise several random draws + for _ in 0..5 { + if let Value::Text(s) = prop.random_value(&mut rng) { + assert!( + s.len() >= 5 && s.len() <= 10, + "length out of range: {}", + s.len() + ); + assert!(s.chars().all(|c| c.is_ascii_alphanumeric())); + } else { + panic!("expected Text variant"); + } + } + } + + #[test] + fn test_random_value_byte_array_fixed_size_20() { + let mut rng = StdRng::seed_from_u64(3); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(20), + max_size: Some(20), + }); + // min == max == 20 => Value::Bytes20 specialization + match prop.random_value(&mut rng) { + Value::Bytes20(_) => {} + v => panic!("expected Bytes20, got {:?}", v), + } + } + + #[test] + fn test_random_value_byte_array_fixed_size_32() { + let mut rng = StdRng::seed_from_u64(4); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(32), + max_size: Some(32), + }); + // min == max == 32 => Value::Bytes32 specialization + match prop.random_value(&mut rng) { + Value::Bytes32(_) => {} + v => panic!("expected Bytes32, got {:?}", v), + } + } + + #[test] + fn test_random_value_byte_array_fixed_size_36() { + let mut rng = StdRng::seed_from_u64(5); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(36), + max_size: Some(36), + }); + // min == max == 36 => Value::Bytes36 specialization + match prop.random_value(&mut rng) { + Value::Bytes36(_) => {} + v => panic!("expected Bytes36, got {:?}", v), + } + } + + #[test] + fn test_random_value_byte_array_fixed_size_other_uses_bytes() { + let mut rng = StdRng::seed_from_u64(6); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(8), + max_size: Some(8), + }); + // min == max but not in the special {20, 32, 36} set => Value::Bytes + match prop.random_value(&mut rng) { + Value::Bytes(b) => assert_eq!(b.len(), 8), + v => panic!("expected Bytes, got {:?}", v), + } + } + + #[test] + fn test_random_value_byte_array_variable_uses_bytes() { + let mut rng = StdRng::seed_from_u64(7); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(1), + max_size: Some(10), + }); + // min != max => Value::Bytes (never Bytes20/32/36) + for _ in 0..5 { + match prop.random_value(&mut rng) { + Value::Bytes(b) => { + assert!(b.len() >= 1 && b.len() <= 10); + } + v => panic!("expected Bytes, got {:?}", v), + } + } + } + + #[test] + fn test_random_value_array_and_variable_type_array_return_null() { + let mut rng = StdRng::seed_from_u64(8); + assert_eq!( + DocumentPropertyType::Array(ArrayItemType::Integer).random_value(&mut rng), + Value::Null + ); + assert_eq!( + DocumentPropertyType::VariableTypeArray(vec![]).random_value(&mut rng), + Value::Null + ); + } + + #[test] + fn test_random_value_object_only_includes_required_fields() { + let mut rng = StdRng::seed_from_u64(9); + let mut sub_fields = IndexMap::new(); + sub_fields.insert( + "req".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U32, + required: true, + transient: false, + }, + ); + sub_fields.insert( + "opt".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U64, + required: false, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(sub_fields); + let val = prop.random_value(&mut rng); + if let Value::Map(entries) = val { + assert_eq!( + entries.len(), + 1, + "only the required field should be present" + ); + assert_eq!(entries[0].0, Value::Text("req".to_string())); + assert!(matches!(entries[0].1, Value::U32(_))); + } else { + panic!("expected Map"); + } + } + + // ----------------------------------------------------------------------- + // random_sub_filled_value() tests + // ----------------------------------------------------------------------- + + #[test] + fn test_random_sub_filled_value_string_uses_min_size() { + let mut rng = StdRng::seed_from_u64(10); + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: Some(7), + max_length: Some(20), + }); + if let Value::Text(s) = prop.random_sub_filled_value(&mut rng) { + assert_eq!(s.len(), 7); + } else { + panic!("expected Text"); + } + } + + #[test] + fn test_random_sub_filled_value_byte_array_uses_min_size() { + let mut rng = StdRng::seed_from_u64(11); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(4), + max_size: Some(100), + }); + if let Value::Bytes(b) = prop.random_sub_filled_value(&mut rng) { + assert_eq!(b.len(), 4); + } else { + panic!("expected Bytes"); + } + } + + #[test] + fn test_random_sub_filled_value_object_includes_all_fields() { + let mut rng = StdRng::seed_from_u64(12); + let mut sub_fields = IndexMap::new(); + sub_fields.insert( + "req".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U32, + required: true, + transient: false, + }, + ); + sub_fields.insert( + "opt".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U64, + required: false, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(sub_fields); + // sub_filled_value includes ALL fields regardless of required flag + let val = prop.random_sub_filled_value(&mut rng); + if let Value::Map(entries) = val { + assert_eq!(entries.len(), 2); + } else { + panic!("expected Map"); + } + } + + #[test] + fn test_random_sub_filled_value_array_returns_null() { + let mut rng = StdRng::seed_from_u64(13); + assert_eq!( + DocumentPropertyType::Array(ArrayItemType::Integer).random_sub_filled_value(&mut rng), + Value::Null + ); + assert_eq!( + DocumentPropertyType::VariableTypeArray(vec![]).random_sub_filled_value(&mut rng), + Value::Null + ); + } + + #[test] + fn test_random_sub_filled_value_date_returns_float() { + let mut rng = StdRng::seed_from_u64(14); + assert!(matches!( + DocumentPropertyType::Date.random_sub_filled_value(&mut rng), + Value::Float(_) + )); + } + + #[test] + fn test_random_sub_filled_value_identifier() { + let mut rng = StdRng::seed_from_u64(15); + assert!(matches!( + DocumentPropertyType::Identifier.random_sub_filled_value(&mut rng), + Value::Identifier(_) + )); + } + + // ----------------------------------------------------------------------- + // random_filled_value() tests + // ----------------------------------------------------------------------- + + #[test] + fn test_random_filled_value_string_uses_max_size() { + let mut rng = StdRng::seed_from_u64(16); + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: Some(1), + max_length: Some(12), + }); + if let Value::Text(s) = prop.random_filled_value(&mut rng) { + assert_eq!(s.len(), 12); + } else { + panic!("expected Text"); + } + } + + #[test] + fn test_random_filled_value_byte_array_uses_max_size() { + let mut rng = StdRng::seed_from_u64(17); + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(0), + max_size: Some(9), + }); + if let Value::Bytes(b) = prop.random_filled_value(&mut rng) { + assert_eq!(b.len(), 9); + } else { + panic!("expected Bytes"); + } + } + + #[test] + fn test_random_filled_value_object_includes_all_fields() { + let mut rng = StdRng::seed_from_u64(18); + let mut sub_fields = IndexMap::new(); + sub_fields.insert( + "a".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U8, + required: true, + transient: false, + }, + ); + sub_fields.insert( + "b".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::Boolean, + required: false, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(sub_fields); + let val = prop.random_filled_value(&mut rng); + if let Value::Map(entries) = val { + assert_eq!(entries.len(), 2); + } else { + panic!("expected Map"); + } + } + + #[test] + fn test_random_filled_value_scalars() { + let mut rng = StdRng::seed_from_u64(19); + // exhaustively exercise each scalar variant not already covered + assert!(matches!( + DocumentPropertyType::U128.random_filled_value(&mut rng), + Value::U128(_) + )); + assert!(matches!( + DocumentPropertyType::I128.random_filled_value(&mut rng), + Value::I128(_) + )); + assert!(matches!( + DocumentPropertyType::U64.random_filled_value(&mut rng), + Value::U64(_) + )); + assert!(matches!( + DocumentPropertyType::I64.random_filled_value(&mut rng), + Value::I64(_) + )); + assert!(matches!( + DocumentPropertyType::U32.random_filled_value(&mut rng), + Value::U32(_) + )); + assert!(matches!( + DocumentPropertyType::I32.random_filled_value(&mut rng), + Value::I32(_) + )); + assert!(matches!( + DocumentPropertyType::U16.random_filled_value(&mut rng), + Value::U16(_) + )); + assert!(matches!( + DocumentPropertyType::I16.random_filled_value(&mut rng), + Value::I16(_) + )); + assert!(matches!( + DocumentPropertyType::U8.random_filled_value(&mut rng), + Value::U8(_) + )); + assert!(matches!( + DocumentPropertyType::I8.random_filled_value(&mut rng), + Value::I8(_) + )); + assert!(matches!( + DocumentPropertyType::F64.random_filled_value(&mut rng), + Value::Float(_) + )); + assert!(matches!( + DocumentPropertyType::Boolean.random_filled_value(&mut rng), + Value::Bool(_) + )); + assert!(matches!( + DocumentPropertyType::Date.random_filled_value(&mut rng), + Value::Float(_) + )); + assert!(matches!( + DocumentPropertyType::Identifier.random_filled_value(&mut rng), + Value::Identifier(_) + )); + assert_eq!( + DocumentPropertyType::Array(ArrayItemType::Integer).random_filled_value(&mut rng), + Value::Null + ); + assert_eq!( + DocumentPropertyType::VariableTypeArray(vec![]).random_filled_value(&mut rng), + Value::Null + ); + } + + #[test] + fn test_random_size_respects_range() { + let mut rng = StdRng::seed_from_u64(20); + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: Some(3), + max_length: Some(6), + }); + for _ in 0..10 { + let sz = prop.random_size(&mut rng); + assert!(sz >= 3 && sz <= 6); + } + } + + // ----------------------------------------------------------------------- + // read_optionally_from() corrupted / truncated buffer error paths + // ----------------------------------------------------------------------- + + #[test] + fn test_read_optionally_from_u64_truncated_returns_error() { + // Only 3 bytes but u64 needs 8 + let prop = DocumentPropertyType::U64; + let data: &[u8] = &[0, 0, 0]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_i64_truncated_returns_error() { + let prop = DocumentPropertyType::I64; + let data: &[u8] = &[1, 2]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_u128_truncated_returns_error() { + let prop = DocumentPropertyType::U128; + let data: &[u8] = &[0; 4]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_i128_truncated_returns_error() { + let prop = DocumentPropertyType::I128; + let data: &[u8] = &[0; 2]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_u32_truncated_returns_error() { + let prop = DocumentPropertyType::U32; + let data: &[u8] = &[0]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_i32_truncated_returns_error() { + let prop = DocumentPropertyType::I32; + let data: &[u8] = &[0, 1]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_u16_truncated_returns_error() { + let prop = DocumentPropertyType::U16; + let data: &[u8] = &[]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_i16_truncated_returns_error() { + let prop = DocumentPropertyType::I16; + let data: &[u8] = &[7]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_u8_eof_returns_error() { + // required=true but empty buffer: u8 read must fail + let prop = DocumentPropertyType::U8; + let data: &[u8] = &[]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_i8_eof_returns_error() { + let prop = DocumentPropertyType::I8; + let data: &[u8] = &[]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_f64_truncated_returns_error() { + let prop = DocumentPropertyType::F64; + let data: &[u8] = &[0, 0, 0]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_date_truncated_returns_error() { + let prop = DocumentPropertyType::Date; + let data: &[u8] = &[1, 2]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_boolean_eof_returns_error() { + let prop = DocumentPropertyType::Boolean; + let data: &[u8] = &[]; + let mut reader = BufReader::new(data); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_identifier_truncated_returns_error() { + let prop = DocumentPropertyType::Identifier; + // Only 16 bytes but identifier needs 32 + let data = [1u8; 16]; + let mut reader = BufReader::new(data.as_slice()); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_string_invalid_utf8_returns_error() { + use integer_encoding::VarInt; + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: None, + max_length: None, + }); + // Valid varint length but invalid UTF-8 bytes + let invalid_bytes = vec![0xFFu8, 0xFEu8, 0xFDu8]; + let mut data = invalid_bytes.len().encode_var_vec(); + data.extend_from_slice(&invalid_bytes); + let mut reader = BufReader::new(data.as_slice()); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_string_truncated_returns_error() { + use integer_encoding::VarInt; + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: None, + max_length: None, + }); + // varint says 10 bytes follow, but only provide 2 + let mut data = 10usize.encode_var_vec(); + data.push(b'a'); + data.push(b'b'); + let mut reader = BufReader::new(data.as_slice()); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_byte_array_fixed_size_truncated_returns_error() { + // min == max == 32, but provide only 10 bytes + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(32), + max_size: Some(32), + }); + let data = [1u8; 10]; + let mut reader = BufReader::new(data.as_slice()); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_object_truncated_length_returns_error() { + use integer_encoding::VarInt; + let mut inner_fields = IndexMap::new(); + inner_fields.insert( + "x".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U32, + required: true, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(inner_fields); + // Claim 100 bytes follow but provide only 2 + let mut data = 100usize.encode_var_vec(); + data.push(0); + data.push(0); + let mut reader = BufReader::new(data.as_slice()); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + #[test] + fn test_read_optionally_from_object_required_field_after_finished_buffer() { + // If the inner buffer ends before a required field is read, we should get + // a CorruptedSerialization error. + use integer_encoding::VarInt; + let mut inner_fields = IndexMap::new(); + // First field is optional + inner_fields.insert( + "a".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U32, + required: false, + transient: false, + }, + ); + // Second field is required + inner_fields.insert( + "b".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U32, + required: true, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(inner_fields); + + // Build inner object bytes: a is optional, marker 0 => absent. Buffer ends. + // Required field "b" then has no data to read => error. + let inner_bytes = vec![0u8]; + let mut data = inner_bytes.len().encode_var_vec(); + data.extend_from_slice(&inner_bytes); + let mut reader = BufReader::new(data.as_slice()); + assert!(prop.read_optionally_from(&mut reader, true).is_err()); + } + + // ----------------------------------------------------------------------- + // encode_value_with_size() type-mismatch errors + // ----------------------------------------------------------------------- + + #[test] + fn test_encode_value_with_size_boolean_type_mismatch() { + let prop = DocumentPropertyType::Boolean; + // U64 cannot be coerced to bool + let result = prop.encode_value_with_size(Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_with_size_object_type_mismatch() { + let prop = DocumentPropertyType::Object(IndexMap::new()); + let result = prop.encode_value_with_size(Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_with_size_array_type_mismatch() { + let prop = DocumentPropertyType::Array(ArrayItemType::Integer); + // Not an Array value + let result = prop.encode_value_with_size(Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_with_size_f64_type_mismatch() { + let prop = DocumentPropertyType::F64; + // Text cannot be converted to a float + let result = prop.encode_value_with_size(Value::Text("not a number".to_string()), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_with_size_u64_type_mismatch() { + let prop = DocumentPropertyType::U64; + let result = prop.encode_value_with_size(Value::Text("x".to_string()), true); + assert!(result.is_err()); + } + + // ----------------------------------------------------------------------- + // encode_value_ref_with_size() type-mismatch errors + // ----------------------------------------------------------------------- + + #[test] + fn test_encode_value_ref_with_size_string_type_mismatch() { + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: None, + max_length: None, + }); + let result = prop.encode_value_ref_with_size(&Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_ref_with_size_boolean_type_mismatch() { + let prop = DocumentPropertyType::Boolean; + let result = prop.encode_value_ref_with_size(&Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_ref_with_size_object_type_mismatch() { + let prop = DocumentPropertyType::Object(IndexMap::new()); + let result = prop.encode_value_ref_with_size(&Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_ref_with_size_array_type_mismatch() { + let prop = DocumentPropertyType::Array(ArrayItemType::Integer); + let result = prop.encode_value_ref_with_size(&Value::U64(1), true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_ref_with_size_object_missing_required_field_errors() { + let mut inner_fields = IndexMap::new(); + inner_fields.insert( + "name".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::String(StringPropertySizes { + min_length: None, + max_length: Some(100), + }), + required: true, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(inner_fields); + let val = Value::Map(vec![]); + let result = prop.encode_value_ref_with_size(&val, true); + assert!(result.is_err()); + } + + #[test] + fn test_encode_value_ref_with_size_object_optional_absent_pushes_zero() { + let mut inner_fields = IndexMap::new(); + inner_fields.insert( + "opt".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U32, + required: false, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(inner_fields); + // Missing optional field path encodes a 0 absence marker + let val = Value::Map(vec![]); + let encoded = prop.encode_value_ref_with_size(&val, true).unwrap(); + // The body is one byte (0), prefixed with varint length (1). + assert_eq!(encoded, vec![1, 0]); + } + + #[test] + fn test_encode_value_ref_with_size_variable_type_array_returns_error_specific() { + let prop = DocumentPropertyType::VariableTypeArray(vec![]); + let val = Value::Array(vec![]); + let result = prop.encode_value_ref_with_size(&val, true); + assert!(result.is_err()); + } + + // ----------------------------------------------------------------------- + // Array encode roundtrip (covers array arm of encode_value_with_size and + // encode_value_ref_with_size) + // ----------------------------------------------------------------------- + + #[test] + fn test_encode_value_with_size_array_of_integers() { + let prop = DocumentPropertyType::Array(ArrayItemType::Integer); + let val = Value::Array(vec![Value::I64(1), Value::I64(2), Value::I64(3)]); + let result = prop.encode_value_with_size(val, true).unwrap(); + // varint(3) + 3 * 8 bytes + assert_eq!(result.len(), 1 + 3 * 8); + assert_eq!(result[0], 3); + } + + #[test] + fn test_encode_value_ref_with_size_array_of_integers() { + let prop = DocumentPropertyType::Array(ArrayItemType::Integer); + let val = Value::Array(vec![Value::I64(1), Value::I64(2)]); + let result = prop.encode_value_ref_with_size(&val, true).unwrap(); + // varint(2) + 2 * 8 bytes + assert_eq!(result.len(), 1 + 2 * 8); + assert_eq!(result[0], 2); + } + + // ----------------------------------------------------------------------- + // try_from_value_map() - extra branches + // ----------------------------------------------------------------------- + + #[test] + fn test_try_from_value_map_string_without_sizes() { + let type_val = Value::Text("string".to_string()); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + let options = DocumentPropertyTypeParsingOptions::default(); + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert_eq!( + result, + DocumentPropertyType::String(StringPropertySizes { + min_length: None, + max_length: None, + }) + ); + } + + #[test] + fn test_try_from_value_map_object_type() { + let type_val = Value::Text("object".to_string()); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + let options = DocumentPropertyTypeParsingOptions::default(); + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert!(matches!(result, DocumentPropertyType::Object(_))); + } + + #[test] + fn test_try_from_value_map_integer_only_min_positive() { + // sized, only min >= 0 => U64 + let type_val = Value::Text("integer".to_string()); + let min_val = Value::I64(10); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + map.insert("minimum".to_string(), &min_val); + let options = DocumentPropertyTypeParsingOptions { + sized_integer_types: true, + }; + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert_eq!(result, DocumentPropertyType::U64); + } + + #[test] + fn test_try_from_value_map_integer_only_min_negative() { + // sized, only min < 0 => I64 + let type_val = Value::Text("integer".to_string()); + let min_val = Value::I64(-10); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + map.insert("minimum".to_string(), &min_val); + let options = DocumentPropertyTypeParsingOptions { + sized_integer_types: true, + }; + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert_eq!(result, DocumentPropertyType::I64); + } + + #[test] + fn test_try_from_value_map_integer_only_max() { + // sized, only max <= u8::MAX => U8 + let type_val = Value::Text("integer".to_string()); + let max_val = Value::I64(200); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + map.insert("maximum".to_string(), &max_val); + let options = DocumentPropertyTypeParsingOptions { + sized_integer_types: true, + }; + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert_eq!(result, DocumentPropertyType::U8); + } + + #[test] + fn test_try_from_value_map_integer_no_min_no_max_defaults_to_i64() { + // sized, no min/max, no enum => I64 + let type_val = Value::Text("integer".to_string()); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + let options = DocumentPropertyTypeParsingOptions { + sized_integer_types: true, + }; + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert_eq!(result, DocumentPropertyType::I64); + } + + #[test] + fn test_try_from_value_map_integer_with_enum_min_max() { + // sized, enum values drive the integer selection + let type_val = Value::Text("integer".to_string()); + let enum_val = Value::Array(vec![Value::I64(0), Value::I64(1), Value::I64(255)]); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + map.insert("enum".to_string(), &enum_val); + let options = DocumentPropertyTypeParsingOptions { + sized_integer_types: true, + }; + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + // min=0, max=255 => U8 + assert_eq!(result, DocumentPropertyType::U8); + } + + #[test] + fn test_try_from_value_map_integer_with_enum_single_value() { + // A single-element enum picks the unsigned type for that max + let type_val = Value::Text("integer".to_string()); + let enum_val = Value::Array(vec![Value::I64(300)]); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + map.insert("enum".to_string(), &enum_val); + let options = DocumentPropertyTypeParsingOptions { + sized_integer_types: true, + }; + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + // 300 => U16 + assert_eq!(result, DocumentPropertyType::U16); + } + + #[test] + fn test_try_from_value_map_array_byte_array_non_identifier_media_type() { + // Non-identifier content-media-type => falls through to ByteArray + let type_val = Value::Text("array".to_string()); + let byte_array_val = Value::Bool(true); + let media_type_val = Value::Text("application/octet-stream".to_string()); + let mut map = BTreeMap::new(); + map.insert("type".to_string(), &type_val); + map.insert("byteArray".to_string(), &byte_array_val); + map.insert("contentMediaType".to_string(), &media_type_val); + let options = DocumentPropertyTypeParsingOptions::default(); + let result = DocumentPropertyType::try_from_value_map(&map, &options).unwrap(); + assert!(matches!(result, DocumentPropertyType::ByteArray(_))); + } + + // ----------------------------------------------------------------------- + // find_integer_type_for_min_and_max_values() - negative boundaries + // ----------------------------------------------------------------------- + + #[test] + fn test_find_integer_type_negative_small_range_is_i8() { + assert_eq!( + find_integer_type_for_min_and_max_values(-50, 50), + DocumentPropertyType::I8 + ); + } + + #[test] + fn test_find_integer_type_negative_medium_range_is_i16() { + assert_eq!( + find_integer_type_for_min_and_max_values(-1000, 1000), + DocumentPropertyType::I16 + ); + } + + #[test] + fn test_find_integer_type_negative_large_range_is_i32() { + assert_eq!( + find_integer_type_for_min_and_max_values(-100_000, 100_000), + DocumentPropertyType::I32 + ); + } + + #[test] + fn test_find_integer_type_very_large_negative_is_i64() { + assert_eq!( + find_integer_type_for_min_and_max_values(i64::MIN, 0), + DocumentPropertyType::I64 + ); + } + + // ----------------------------------------------------------------------- + // sanitize_value_mut() - additional branches + // ----------------------------------------------------------------------- + + #[test] + fn test_sanitize_value_mut_byte_array_from_base64_fallback() { + // The hex decode should fail (contains +/= padding); base64 path should win + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: None, + max_size: None, + }); + // "hello" in base64 is "aGVsbG8=" + let mut val = Value::Text("aGVsbG8=".to_string()); + prop.sanitize_value_mut(&mut val); + assert_eq!(val, Value::Bytes(b"hello".to_vec())); + } + + #[test] + fn test_sanitize_value_mut_byte_array_size_constraint_rejects() { + // hex of 10 bytes, but min_size is 100 => value is left unchanged + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(100), + max_size: None, + }); + let original = Value::Text("aabbccddee".to_string()); // 5 bytes + let mut val = original.clone(); + prop.sanitize_value_mut(&mut val); + assert_eq!(val, original, "out-of-bounds byte array must remain text"); + } + + #[test] + fn test_sanitize_value_mut_byte_array_fixed_size_32() { + // Fixed 32-byte hex string => Value::Bytes32 specialization + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(32), + max_size: Some(32), + }); + // 64 hex chars = 32 bytes + let hex_str = "00".repeat(32); + let mut val = Value::Text(hex_str); + prop.sanitize_value_mut(&mut val); + assert!(matches!(val, Value::Bytes32(_))); + } + + #[test] + fn test_sanitize_value_mut_byte_array_fixed_size_20() { + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(20), + max_size: Some(20), + }); + let hex_str = "ab".repeat(20); + let mut val = Value::Text(hex_str); + prop.sanitize_value_mut(&mut val); + assert!(matches!(val, Value::Bytes20(_))); + } + + #[test] + fn test_sanitize_value_mut_byte_array_fixed_size_36() { + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(36), + max_size: Some(36), + }); + let hex_str = "cd".repeat(36); + let mut val = Value::Text(hex_str); + prop.sanitize_value_mut(&mut val); + assert!(matches!(val, Value::Bytes36(_))); + } + + #[test] + fn test_sanitize_value_mut_byte_array_undecodable_leaves_unchanged() { + // Neither valid hex (odd chars) nor valid base64 (has !@# chars) + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: None, + max_size: None, + }); + let original = Value::Text("!@#not valid!".to_string()); + let mut val = original.clone(); + prop.sanitize_value_mut(&mut val); + assert_eq!(val, original); + } + + #[test] + fn test_sanitize_value_mut_object_nested() { + // Object sanitization should recurse into nested fields + let mut sub_fields = IndexMap::new(); + sub_fields.insert( + "small".to_string(), + DocumentProperty { + property_type: DocumentPropertyType::U8, + required: true, + transient: false, + }, + ); + let prop = DocumentPropertyType::Object(sub_fields); + + let mut val = Value::Map(vec![( + Value::Text("small".to_string()), + Value::U32(200), // will be sanitized to U8 + )]); + prop.sanitize_value_mut(&mut val); + if let Value::Map(entries) = val { + assert_eq!(entries[0].1, Value::U8(200)); + } else { + panic!("expected Map"); + } + } + + #[test] + fn test_sanitize_value_mut_array_elements() { + let prop = DocumentPropertyType::Array(ArrayItemType::Integer); + // Provide an array of Values + let original_vals = vec![Value::I64(1), Value::I64(2)]; + let mut val = Value::Array(original_vals.clone()); + prop.sanitize_value_mut(&mut val); + // Array path iterates every element; item_type.sanitize_value_mut is + // defined in array.rs and shouldn't panic on well-formed input. + if let Value::Array(items) = val { + assert_eq!(items.len(), 2); + } else { + panic!("expected Array"); + } + } + + #[test] + fn test_sanitize_value_mut_variable_type_array_elements() { + let prop = DocumentPropertyType::VariableTypeArray(vec![ + ArrayItemType::Integer, + ArrayItemType::Integer, + ]); + let mut val = Value::Array(vec![Value::I64(10), Value::I64(20)]); + prop.sanitize_value_mut(&mut val); + if let Value::Array(items) = val { + assert_eq!(items.len(), 2); + } else { + panic!("expected Array"); + } + } + + #[test] + fn test_sanitize_value_mut_u128_already_correct_unchanged() { + let prop = DocumentPropertyType::U128; + let mut val = Value::U128(42); + prop.sanitize_value_mut(&mut val); + assert_eq!(val, Value::U128(42)); + } + + #[test] + fn test_sanitize_value_mut_u8_out_of_range_unchanged() { + let prop = DocumentPropertyType::U8; + let original = Value::U16(300); // > u8::MAX + let mut val = original.clone(); + prop.sanitize_value_mut(&mut val); + // Guard clause `n <= u8::MAX as u16` fails, so no conversion + assert_eq!(val, original); + } + + #[test] + fn test_sanitize_value_mut_i8_out_of_range_unchanged() { + let prop = DocumentPropertyType::I8; + let original = Value::I16(500); + let mut val = original.clone(); + prop.sanitize_value_mut(&mut val); + assert_eq!(val, original); + } + + // ----------------------------------------------------------------------- + // Additional numeric encode/decode roundtrips at boundaries through + // value_from_string() + // ----------------------------------------------------------------------- + + #[test] + fn test_value_from_string_i64_min_max() { + let prop = DocumentPropertyType::I64; + let min_str = i64::MIN.to_string(); + let max_str = i64::MAX.to_string(); + assert_eq!( + prop.value_from_string(&min_str).unwrap(), + Value::I64(i64::MIN) + ); + assert_eq!( + prop.value_from_string(&max_str).unwrap(), + Value::I64(i64::MAX) + ); + } + + #[test] + fn test_value_from_string_u128_overflow_errors() { + let prop = DocumentPropertyType::U128; + // One larger than u128::MAX + let out_of_range = "340282366920938463463374607431768211456"; + assert!(prop.value_from_string(out_of_range).is_err()); + } + + #[test] + fn test_value_from_string_i128_overflow_errors() { + let prop = DocumentPropertyType::I128; + // Way too small + let out_of_range = "-170141183460469231731687303715884105729"; + assert!(prop.value_from_string(out_of_range).is_err()); + } + + #[test] + fn test_value_from_string_u8_negative_errors() { + let prop = DocumentPropertyType::U8; + assert!(prop.value_from_string("-1").is_err()); + } + + #[test] + fn test_value_from_string_f64_invalid_errors() { + let prop = DocumentPropertyType::F64; + assert!(prop.value_from_string("not_a_float").is_err()); + } + + #[test] + fn test_value_from_string_boolean_invalid_empty() { + let prop = DocumentPropertyType::Boolean; + assert!(prop.value_from_string("").is_err()); + } + + #[test] + fn test_value_from_string_string_at_exact_max_len_ok() { + let prop = DocumentPropertyType::String(StringPropertySizes { + min_length: Some(3), + max_length: Some(5), + }); + // Boundary: exactly min and exactly max + assert!(prop.value_from_string("abc").is_ok()); + assert!(prop.value_from_string("abcde").is_ok()); + } + + #[test] + fn test_value_from_string_byte_array_exact_boundaries() { + let prop = DocumentPropertyType::ByteArray(ByteArrayPropertySizes { + min_size: Some(2), + max_size: Some(4), + }); + // 2 hex chars = 1 byte -> too small + assert!(prop.value_from_string("ab").is_err()); + // 4 hex chars = 2 bytes -> ok + assert!(prop.value_from_string("abcd").is_ok()); + // 8 hex chars = 4 bytes -> ok + assert!(prop.value_from_string("aabbccdd").is_ok()); + // 10 hex chars = 5 bytes -> too big + assert!(prop.value_from_string("aabbccddee").is_err()); + } + + // ----------------------------------------------------------------------- + // DocumentPropertyTypeParsingOptions::From<&DataContractConfig> test + // ----------------------------------------------------------------------- + + #[test] + fn test_parsing_options_from_data_contract_config() { + let config = DataContractConfig::default_for_version(PlatformVersion::latest()) + .expect("should create default config"); + let opts: DocumentPropertyTypeParsingOptions = (&config).into(); + // Just verify that the conversion yields the same sized_integer_types + assert_eq!(opts.sized_integer_types, config.sized_integer_types()); + } + + // ----------------------------------------------------------------------- + // get_field_type_matching_error() - producer of ValueWrongType + // ----------------------------------------------------------------------- + + #[test] + fn test_get_field_type_matching_error_is_value_wrong_type() { + let err = get_field_type_matching_error(&Value::U64(1)); + match err { + DataContractError::ValueWrongType(msg) => { + assert!(msg.contains("document field type")); + } + other => panic!("expected ValueWrongType, got {:?}", other), + } + } + + // ----------------------------------------------------------------------- + // Integer encode/decode roundtrips for u128 boundary values + // ----------------------------------------------------------------------- + + #[test] + fn test_decode_i128_of_zero_roundtrip() { + let enc = DocumentPropertyType::encode_i128(0); + assert_eq!(DocumentPropertyType::decode_i128(&enc).unwrap(), 0); + } + + #[test] + fn test_encode_i128_preserves_sort_order() { + let values: Vec = vec![i128::MIN, -100, -1, 0, 1, 100, i128::MAX]; + let encoded: Vec> = values + .iter() + .map(|v| DocumentPropertyType::encode_i128(*v)) + .collect(); + for window in encoded.windows(2) { + assert!(window[0] < window[1], "sort order not preserved for i128"); + } + } + + #[test] + fn test_encode_u128_preserves_sort_order() { + // sort order holds in the lower half of the u128 range + let values: Vec = vec![0, 1, 100, 1_000_000, i128::MAX as u128]; + let encoded: Vec> = values + .iter() + .map(|v| DocumentPropertyType::encode_u128(*v)) + .collect(); + for window in encoded.windows(2) { + assert!(window[0] < window[1], "sort order not preserved for u128"); + } + } + + #[test] + fn test_encode_u32_preserves_sort_order() { + let values: Vec = vec![0, 1, 100, 1_000, i32::MAX as u32]; + let encoded: Vec> = values + .iter() + .map(|v| DocumentPropertyType::encode_u32(*v)) + .collect(); + for window in encoded.windows(2) { + assert!(window[0] < window[1], "sort order not preserved for u32"); + } + } + + #[test] + fn test_encode_i16_preserves_sort_order() { + let values: Vec = vec![i16::MIN, -1000, -1, 0, 1, 1000, i16::MAX]; + let encoded: Vec> = values + .iter() + .map(|v| DocumentPropertyType::encode_i16(*v)) + .collect(); + for window in encoded.windows(2) { + assert!(window[0] < window[1]); + } + } + + #[test] + fn test_encode_i8_preserves_sort_order() { + let values: Vec = vec![i8::MIN, -100, -1, 0, 1, 100, i8::MAX]; + let encoded: Vec> = values + .iter() + .map(|v| DocumentPropertyType::encode_i8(*v)) + .collect(); + for window in encoded.windows(2) { + assert!(window[0] < window[1]); + } + } } diff --git a/packages/rs-dpp/src/data_contract/document_type/v1/mod.rs b/packages/rs-dpp/src/data_contract/document_type/v1/mod.rs index ce7a0dd4f3f..97586c95973 100644 --- a/packages/rs-dpp/src/data_contract/document_type/v1/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/v1/mod.rs @@ -131,3 +131,186 @@ impl From for DocumentTypeV1 { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::data_contract::config::DataContractConfig; + use crate::data_contract::document_type::accessors::DocumentTypeV0Getters; + use crate::data_contract::document_type::token_costs::accessors::TokenCostGettersV0; + use crate::tokens::gas_fees_paid_by::GasFeesPaidBy; + use crate::tokens::token_amount_on_contract_token::{ + DocumentActionTokenCost, DocumentActionTokenEffect, + }; + use platform_value::platform_value; + use platform_version::version::PlatformVersion; + + /// Build a `DocumentTypeV0` via `DocumentType::try_from_schema` against an older + /// PlatformVersion so the `try_from_schema` dispatcher returns V0. + fn build_v0(schema: Value, transferable: bool, mutable: bool) -> DocumentTypeV0 { + // Use PlatformVersion::first() which routes to DocumentTypeV0::try_from_schema + let platform_version = PlatformVersion::first(); + let config = + DataContractConfig::default_for_version(platform_version).expect("default config"); + + // Augment schema with flags controlling test + let schema = match schema { + Value::Map(mut map) => { + map.push(( + Value::Text("documentsMutable".to_string()), + Value::Bool(mutable), + )); + if transferable { + map.push((Value::Text("transferable".to_string()), Value::U64(1))); + } + Value::Map(map) + } + other => other, + }; + + let dt = crate::data_contract::document_type::DocumentType::try_from_schema( + Identifier::new([33; 32]), + 0, + config.version(), + "legacy", + schema, + None, + &BTreeMap::new(), + &config, + false, + &mut vec![], + platform_version, + ) + .expect("v0 doc type should build"); + + match dt { + crate::data_contract::document_type::DocumentType::V0(v0) => v0, + crate::data_contract::document_type::DocumentType::V1(_) => { + panic!("expected V0 from first() version routing") + } + } + } + + #[test] + fn from_v0_preserves_all_fields_and_zeroes_token_costs() { + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32}, + }, + "additionalProperties": false, + }); + let v0 = build_v0(schema, true, true); + + // Capture pre-conversion values + let name = v0.name.clone(); + let data_contract_id = v0.data_contract_id; + let is_mutable = v0.documents_mutable; + let transferable = v0.documents_transferable; + + let v1: DocumentTypeV1 = v0.into(); + + // Core fields preserved + assert_eq!(v1.name(), &name); + assert_eq!(v1.data_contract_id(), data_contract_id); + assert_eq!(v1.documents_mutable(), is_mutable); + assert_eq!(v1.documents_transferable(), transferable); + + // Token costs default to all None + assert!(v1.token_costs.document_creation_token_cost().is_none()); + assert!(v1.token_costs.document_replacement_token_cost().is_none()); + assert!(v1.token_costs.document_deletion_token_cost().is_none()); + assert!(v1.token_costs.document_transfer_token_cost().is_none()); + assert!(v1.token_costs.document_price_update_token_cost().is_none()); + assert!(v1.token_costs.document_purchase_token_cost().is_none()); + } + + #[test] + fn from_v0_preserves_properties_and_indices() { + let schema = platform_value!({ + "type": "object", + "properties": { + "first": {"type": "string", "position": 0, "maxLength": 60_u32}, + "second": {"type": "string", "position": 1, "maxLength": 60_u32}, + }, + "indices": [ + {"name": "by_first", "properties": [{"first": "asc"}]}, + ], + "additionalProperties": false, + }); + let v0 = build_v0(schema, false, false); + let original_properties = v0.properties.clone(); + let original_indices = v0.indices.clone(); + + let v1: DocumentTypeV1 = v0.into(); + + assert_eq!(v1.properties().len(), original_properties.len()); + assert!(v1.properties().contains_key("first")); + assert!(v1.properties().contains_key("second")); + + assert_eq!(v1.indexes().len(), original_indices.len()); + assert!(v1.indexes().contains_key("by_first")); + } + + // ---------------------------------------------------------------- + // DocumentTypeV1Setters — cover each setter by calling through the + // trait on the resulting DocumentTypeV1 value. + // ---------------------------------------------------------------- + #[test] + fn setters_update_token_costs_in_place() { + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32}, + }, + "additionalProperties": false, + }); + let mut v1: DocumentTypeV1 = build_v0(schema, false, false).into(); + + let cost = DocumentActionTokenCost { + contract_id: None, + token_contract_position: 0, + token_amount: 7, + effect: DocumentActionTokenEffect::TransferTokenToContractOwner, + gas_fees_paid_by: GasFeesPaidBy::DocumentOwner, + }; + + v1.set_document_creation_token_cost(Some(cost)); + v1.set_document_replacement_token_cost(Some(cost)); + v1.set_document_deletion_token_cost(Some(cost)); + v1.set_document_transfer_token_cost(Some(cost)); + v1.set_document_price_update_token_cost(Some(cost)); + v1.set_document_purchase_token_cost(Some(cost)); + + assert!(v1.token_costs.document_creation_token_cost().is_some()); + assert!(v1.token_costs.document_replacement_token_cost().is_some()); + assert!(v1.token_costs.document_deletion_token_cost().is_some()); + assert!(v1.token_costs.document_transfer_token_cost().is_some()); + assert!(v1.token_costs.document_price_update_token_cost().is_some()); + assert!(v1.token_costs.document_purchase_token_cost().is_some()); + + // Clearing a cost should result in None + v1.set_document_creation_token_cost(None); + assert!(v1.token_costs.document_creation_token_cost().is_none()); + } + + #[test] + fn traits_are_implemented_for_document_type_v1() { + // Sanity-check that DocumentTypeV1 implements DocumentTypeBasicMethods + // and DocumentTypeV0Methods via the trait system. + let schema = platform_value!({ + "type": "object", + "properties": { + "a": {"type": "string", "position": 0, "maxLength": 10_u32}, + }, + "additionalProperties": false, + }); + let v0 = build_v0(schema, false, true); + let v1: DocumentTypeV1 = v0.into(); + + // Methods from DocumentTypeBasicMethods should be accessible + assert!(v1.requires_revision()); // mutable + // initial_revision method is present + assert!(v1.initial_revision().is_some()); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/check_tx_verification/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/check_tx_verification/v0/mod.rs index 112457fd596..afbb5a48310 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/check_tx_verification/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/check_tx_verification/v0/mod.rs @@ -670,6 +670,83 @@ mod tests { ); } + /// A transition whose owner identity does not exist in state must fail + /// with an IdentityNotFoundError surfaced via the identity-signed + /// validation branch (not an Err). + #[test] + fn should_return_invalid_when_owner_identity_not_in_state() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + // Build a transition, then drop the identity from the setup by + // using setup_identity_without_adding_it so the signer exists but + // the state does not contain the identity. + use crate::execution::validation::state_transition::state_transitions::tests::setup_identity_without_adding_it; + let (identity, signer, key) = + setup_identity_without_adding_it(333, dash_to_credits!(2.0)); + + let mut data_contract = json_document_to_contract_with_ids( + "tests/supporting_files/contract/dpns/dpns-contract-contested-unique-index.json", + None, + None, + false, + platform_version, + ) + .expect("expected to get contract"); + + data_contract + .set_config(DataContractConfig::default_for_version(platform_version).unwrap()); + + let data_contract_create_transition = + DataContractCreateTransition::new_from_data_contract( + data_contract, + 1, + &identity.into_partial_identity_info(), + key.id(), + &signer, + platform_version, + None, + ) + .expect("expected to create transition"); + + let state_transition: StateTransition = data_contract_create_transition.into(); + + let platform_state = platform.state.load(); + let platform_ref = PlatformRef { + drive: &platform.drive, + state: &platform_state, + config: &platform.config, + core_rpc: &platform.core_rpc, + }; + + let result = state_transition_to_execution_event_for_check_tx_v0( + &platform_ref, + state_transition, + CheckTxLevel::FirstTimeCheck, + platform_version, + ); + + assert!(result.is_ok(), "should not return an Err"); + let validation_result = result.unwrap(); + assert!( + !validation_result.is_valid(), + "validation should fail: identity missing" + ); + use dpp::consensus::signature::SignatureError; + assert!( + validation_result.errors.iter().any(|e| matches!( + e, + ConsensusError::SignatureError(SignatureError::IdentityNotFoundError(_)) + )), + "expected IdentityNotFoundError, got: {:?}", + validation_result.errors + ); + // Silence unused: platform is mutable only to match existing setup function style. + let _ = &mut platform; + } + #[test] fn should_return_invalid_result_for_replayed_nonce() { let platform_version = PlatformVersion::latest(); diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs index 1ca28e3ff83..7da5fac9f37 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs @@ -86,3 +86,278 @@ pub(super) fn verify_asset_lock_is_not_spent_and_has_enough_balance_v0( StoredAssetLockInfo::NotPresent => Ok(ConsensusValidationResult::new()), } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::platform_types::platform::PlatformRef; + use crate::test::helpers::setup::TestPlatformBuilder; + use dpp::block::block_info::BlockInfo; + use dpp::consensus::basic::BasicError; + use dpp::consensus::ConsensusError; + use dpp::dashcore::hashes::Hash; + use dpp::dashcore::Txid; + use drive::util::batch::DriveOperation::SystemOperation; + use drive::util::batch::SystemOperationType; + + fn build_out_point(seed: u8) -> OutPoint { + let txid = Txid::from_raw_hash(dpp::dashcore::hashes::sha256d::Hash::from_byte_array( + [seed; 32], + )); + OutPoint { txid, vout: 0 } + } + + fn store_asset_lock_value( + platform: &crate::test::helpers::setup::TempPlatform, + out_point: OutPoint, + value: AssetLockValue, + platform_version: &PlatformVersion, + ) { + let op = SystemOperation(SystemOperationType::AddUsedAssetLock { + asset_lock_outpoint: Bytes36::new(out_point.into()), + asset_lock_value: value, + }); + platform + .drive + .apply_drive_operations( + vec![op], + true, + &BlockInfo::default(), + None, + platform_version, + None, + ) + .expect("expected to apply drive operations"); + } + + #[test] + fn should_return_valid_empty_result_when_not_present() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let platform_state = platform.state.load(); + let platform_ref = PlatformRef { + drive: &platform.drive, + state: &platform_state, + config: &platform.config, + core_rpc: &platform.core_rpc, + }; + + let out_point = build_out_point(0xAA); + let mut hasher = SignableBytesHasher::Bytes(b"not-yet-tried".to_vec()); + + let result = verify_asset_lock_is_not_spent_and_has_enough_balance_v0( + &platform_ref, + &mut hasher, + out_point, + 1_000, + None, + platform_version, + ) + .expect("should not error"); + + // NotPresent -> returns an empty, still-valid ConsensusValidationResult with no data set + assert!(result.is_valid(), "not present should yield valid result"); + assert!( + result.data.is_none(), + "no AssetLockValue should be returned for NotPresent" + ); + } + + #[test] + fn should_return_already_consumed_error_when_fully_consumed() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let out_point = build_out_point(0xBB); + + // A 0-remaining-credit value is stored with an empty item, representing FullyConsumed. + let zero_remaining = + AssetLockValue::new(10_000, vec![0x76, 0xa9, 0x14], 0, vec![], platform_version) + .expect("should build asset lock value"); + + store_asset_lock_value(&platform, out_point, zero_remaining, platform_version); + + let platform_state = platform.state.load(); + let platform_ref = PlatformRef { + drive: &platform.drive, + state: &platform_state, + config: &platform.config, + core_rpc: &platform.core_rpc, + }; + + let mut hasher = SignableBytesHasher::Bytes(b"sig-bytes".to_vec()); + + let result = verify_asset_lock_is_not_spent_and_has_enough_balance_v0( + &platform_ref, + &mut hasher, + out_point, + 1_000, + None, + platform_version, + ) + .expect("should not error"); + + assert!(!result.is_valid(), "fully consumed should be invalid"); + assert!( + result.errors.iter().any(|e| matches!( + e, + ConsensusError::BasicError( + BasicError::IdentityAssetLockTransactionOutPointAlreadyConsumedError(_) + ) + )), + "expected AlreadyConsumed error, got: {:?}", + result.errors + ); + } + + #[test] + fn should_return_not_enough_balance_when_remaining_below_required() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let out_point = build_out_point(0xCC); + + // Partially consumed: remaining = 500 < required 1000 + let partial = AssetLockValue::new(10_000, vec![0x76, 0xa9], 500, vec![], platform_version) + .expect("should build asset lock value"); + + store_asset_lock_value(&platform, out_point, partial, platform_version); + + let platform_state = platform.state.load(); + let platform_ref = PlatformRef { + drive: &platform.drive, + state: &platform_state, + config: &platform.config, + core_rpc: &platform.core_rpc, + }; + + let mut hasher = SignableBytesHasher::Bytes(b"sig-not-yet-used".to_vec()); + + let result = verify_asset_lock_is_not_spent_and_has_enough_balance_v0( + &platform_ref, + &mut hasher, + out_point, + 1_000, + None, + platform_version, + ) + .expect("should not error"); + + assert!(!result.is_valid(), "should be invalid"); + assert!( + result.errors.iter().any(|e| matches!( + e, + ConsensusError::BasicError( + BasicError::IdentityAssetLockTransactionOutPointNotEnoughBalanceError(_) + ) + )), + "expected NotEnoughBalance error, got: {:?}", + result.errors + ); + } + + #[test] + fn should_return_replay_error_when_signable_bytes_already_used() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let out_point = build_out_point(0xDD); + + // Pre-hash the signable bytes to compute the tag that will be matched + let mut pre_hasher = SignableBytesHasher::Bytes(b"prev-tried-bytes".to_vec()); + let used_tag = pre_hasher.to_hashed_bytes(); + + let partial = AssetLockValue::new( + 10_000, + vec![0x76, 0xa9], + 5_000, + vec![used_tag], + platform_version, + ) + .expect("should build asset lock value"); + + store_asset_lock_value(&platform, out_point, partial, platform_version); + + let platform_state = platform.state.load(); + let platform_ref = PlatformRef { + drive: &platform.drive, + state: &platform_state, + config: &platform.config, + core_rpc: &platform.core_rpc, + }; + + // A fresh hasher with the same bytes — the stored used_tag should match. + let mut hasher = SignableBytesHasher::Bytes(b"prev-tried-bytes".to_vec()); + + let result = verify_asset_lock_is_not_spent_and_has_enough_balance_v0( + &platform_ref, + &mut hasher, + out_point, + 1_000, + None, + platform_version, + ) + .expect("should not error"); + + assert!(!result.is_valid(), "should be invalid"); + assert!( + result.errors.iter().any(|e| matches!( + e, + ConsensusError::BasicError( + BasicError::IdentityAssetLockStateTransitionReplayError(_) + ) + )), + "expected ReplayError, got: {:?}", + result.errors + ); + } + + #[test] + fn should_return_value_when_enough_balance_and_not_replayed() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let out_point = build_out_point(0xEE); + + let partial = + AssetLockValue::new(10_000, vec![0x76, 0xa9], 5_000, vec![], platform_version) + .expect("should build asset lock value"); + + store_asset_lock_value(&platform, out_point, partial, platform_version); + + let platform_state = platform.state.load(); + let platform_ref = PlatformRef { + drive: &platform.drive, + state: &platform_state, + config: &platform.config, + core_rpc: &platform.core_rpc, + }; + + let mut hasher = SignableBytesHasher::Bytes(b"brand-new-signable".to_vec()); + + let result = verify_asset_lock_is_not_spent_and_has_enough_balance_v0( + &platform_ref, + &mut hasher, + out_point, + 1_000, + None, + platform_version, + ) + .expect("should not error"); + + assert!(result.is_valid(), "should be valid: {:?}", result.errors); + let value = result.data.expect("should have asset lock value"); + assert_eq!(value.remaining_credit_value(), 5_000); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/transaction/fetch_asset_lock_transaction_output_sync/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/transaction/fetch_asset_lock_transaction_output_sync/v0/mod.rs index b48a79bf8ef..ae16085efc9 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/transaction/fetch_asset_lock_transaction_output_sync/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/transaction/fetch_asset_lock_transaction_output_sync/v0/mod.rs @@ -106,3 +106,113 @@ pub fn fetch_asset_lock_transaction_output_sync_v0( } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::rpc::core::MockCoreRPCLike; + use dpp::consensus::basic::BasicError; + use dpp::consensus::ConsensusError; + use dpp::dashcore::hashes::Hash; + use dpp::dashcore::transaction::special_transaction::asset_lock::AssetLockPayload; + use dpp::dashcore::transaction::special_transaction::TransactionPayload; + use dpp::dashcore::{Transaction, TxIn, Txid}; + use dpp::identity::state_transition::asset_lock_proof::chain::ChainAssetLockProof; + use dpp::identity::state_transition::asset_lock_proof::InstantAssetLockProof; + + fn dummy_txid(seed: u8) -> Txid { + Txid::from_raw_hash(dpp::dashcore::hashes::sha256d::Hash::from_byte_array( + [seed; 32], + )) + } + + #[test] + fn instant_proof_with_missing_output_returns_output_not_found() { + let platform_version = PlatformVersion::latest(); + + // InstantAssetLockProof with a vout that doesn't exist in the transaction's tx outs. + let tx_without_output = Transaction { + version: 3, + lock_time: 0, + input: vec![TxIn::default()], + output: vec![], // no outputs at all + special_transaction_payload: Some(TransactionPayload::AssetLockPayloadType( + AssetLockPayload { + version: 1, + credit_outputs: vec![], + }, + )), + }; + + let proof = InstantAssetLockProof::new( + dpp::dashcore::InstantLock { + version: 1, + inputs: vec![], + txid: tx_without_output.txid(), + cyclehash: [0u8; 32].into(), + signature: [0u8; 96].into(), + }, + tx_without_output, + 5, // output_index out of range + ); + + let asset_lock_proof = AssetLockProof::Instant(proof); + + let core_rpc = MockCoreRPCLike::new(); + let result = fetch_asset_lock_transaction_output_sync_v0( + &core_rpc, + &asset_lock_proof, + platform_version, + ) + .expect("should not return Err"); + + assert!(!result.is_valid(), "should be invalid"); + assert!( + result.errors.iter().any(|e| matches!( + e, + ConsensusError::BasicError( + BasicError::IdentityAssetLockTransactionOutputNotFoundError(_) + ) + )), + "expected IdentityAssetLockTransactionOutputNotFoundError, got: {:?}", + result.errors + ); + } + + #[test] + fn chain_proof_tx_not_found_returns_tx_not_found_error() { + let platform_version = PlatformVersion::latest(); + + let txid = dummy_txid(0x11); + let mut out_point_bytes = [0u8; 36]; + out_point_bytes[..32].copy_from_slice(txid.as_raw_hash().as_byte_array()); + out_point_bytes[32..36].copy_from_slice(&0u32.to_le_bytes()); + + let chain_proof = ChainAssetLockProof::new(42, out_point_bytes); + let asset_lock_proof = AssetLockProof::Chain(chain_proof); + + let mut core_rpc = MockCoreRPCLike::new(); + core_rpc + .expect_get_optional_transaction_extended_info() + .returning(|_txid| Ok(None)); + + let result = fetch_asset_lock_transaction_output_sync_v0( + &core_rpc, + &asset_lock_proof, + platform_version, + ) + .expect("should not return Err"); + + assert!(!result.is_valid(), "should be invalid"); + assert!( + result.errors.iter().any(|e| matches!( + e, + ConsensusError::BasicError( + BasicError::IdentityAssetLockTransactionIsNotFoundError(_) + ) + )), + "expected IdentityAssetLockTransactionIsNotFoundError, got: {:?}", + result.errors + ); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_exists/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_exists/v0/mod.rs index a29d2b8821b..e9ae59c3251 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_exists/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_exists/v0/mod.rs @@ -24,3 +24,94 @@ pub(super) fn validate_identity_exists_v0( Ok(maybe_revision.is_some()) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::execution::types::execution_operation::ValidationOperation; + use crate::test::helpers::setup::TestPlatformBuilder; + use dpp::block::block_info::BlockInfo; + use dpp::identity::accessors::IdentityGettersV0; + use dpp::identity::Identity; + use dpp::version::DefaultForPlatformVersion; + use rand::SeedableRng; + + #[test] + fn should_return_false_when_identity_not_in_state() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let missing_identity_id = Identifier::random(); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let exists = validate_identity_exists_v0( + &platform.drive, + &missing_identity_id, + &mut execution_context, + None, + platform_version, + ) + .expect("should not error"); + + assert!(!exists, "identity should not be found"); + + // Confirm the fetch operation was recorded for fee accounting + let has_retrieve_op = execution_context + .operations_slice() + .iter() + .any(|op| matches!(op, ValidationOperation::RetrieveIdentity(_))); + assert!( + has_retrieve_op, + "should record a RetrieveIdentity validation operation" + ); + } + + #[test] + fn should_return_true_when_identity_is_in_state() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let (identity, _keys): (Identity, Vec<(dpp::identity::IdentityPublicKey, [u8; 32])>) = + Identity::random_identity_with_main_keys_with_private_key( + 2, + &mut rand::rngs::StdRng::seed_from_u64(17), + platform_version, + ) + .expect("got identity"); + + let identity_id = identity.id(); + platform + .drive + .add_new_identity( + identity, + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("should add identity"); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let exists = validate_identity_exists_v0( + &platform.drive, + &identity_id, + &mut execution_context, + None, + platform_version, + ) + .expect("should not error"); + + assert!(exists, "identity should be found after being added"); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs index a9feed80d56..5e2e0d2c5f6 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs @@ -53,3 +53,158 @@ pub(super) fn validate_identity_public_key_ids_dont_exist_in_state_v0( Ok(SimpleConsensusValidationResult::default()) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::helpers::setup::TestPlatformBuilder; + use dpp::block::block_info::BlockInfo; + use dpp::consensus::ConsensusError; + use dpp::identity::accessors::IdentityGettersV0; + use dpp::identity::identity_public_key::accessors::v0::IdentityPublicKeyGettersV0; + use dpp::identity::{Identity, IdentityPublicKey, KeyType, Purpose, SecurityLevel}; + use dpp::platform_value::BinaryData; + use dpp::state_transition::public_key_in_creation::v0::IdentityPublicKeyInCreationV0; + use dpp::version::DefaultForPlatformVersion; + use rand::SeedableRng; + + #[test] + fn should_pass_when_identity_has_no_existing_keys_with_those_ids() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + // Use an identity id that doesn't exist in state so no key ids will be found. + let identity_id = Identifier::random(); + + let key: IdentityPublicKeyInCreation = IdentityPublicKeyInCreationV0 { + id: 9999, + key_type: KeyType::ECDSA_HASH160, + purpose: Purpose::AUTHENTICATION, + security_level: SecurityLevel::HIGH, + contract_bounds: None, + read_only: false, + data: BinaryData::new(vec![1u8; 20]), + signature: BinaryData::default(), + } + .into(); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let result = validate_identity_public_key_ids_dont_exist_in_state_v0( + identity_id, + &[key], + &platform.drive, + None, + &mut execution_context, + platform_version, + ) + .expect("should succeed"); + + assert!(result.is_valid(), "should be valid with fresh key ids"); + } + + #[test] + fn should_fail_when_key_id_already_exists_for_identity() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let (identity, keys_with_private): (Identity, Vec<(IdentityPublicKey, [u8; 32])>) = + Identity::random_identity_with_main_keys_with_private_key( + 2, + &mut rand::rngs::StdRng::seed_from_u64(55), + platform_version, + ) + .expect("got an identity"); + + let identity_id = identity.id(); + + // Pick an existing key id from this identity. + let existing_key_id = keys_with_private[0].0.id(); + + platform + .drive + .add_new_identity( + identity, + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("should add identity"); + + // Build a creation-key with the same id — this should be flagged as duplicate. + let key: IdentityPublicKeyInCreation = IdentityPublicKeyInCreationV0 { + id: existing_key_id, + key_type: KeyType::ECDSA_HASH160, + purpose: Purpose::AUTHENTICATION, + security_level: SecurityLevel::HIGH, + contract_bounds: None, + read_only: false, + data: BinaryData::new(vec![9u8; 20]), + signature: BinaryData::default(), + } + .into(); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let result = validate_identity_public_key_ids_dont_exist_in_state_v0( + identity_id, + &[key], + &platform.drive, + None, + &mut execution_context, + platform_version, + ) + .expect("should succeed"); + + assert!( + !result.is_valid(), + "should be invalid when key id already exists for the identity" + ); + assert_eq!(result.errors.len(), 1); + match &result.errors[0] { + ConsensusError::BasicError(BasicError::DuplicatedIdentityPublicKeyIdBasicError(e)) => { + assert!(e.duplicated_ids().contains(&existing_key_id)); + } + other => panic!( + "expected DuplicatedIdentityPublicKeyIdBasicError, got {:?}", + other + ), + } + } + + #[test] + fn should_pass_when_empty_key_list() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let identity_id = Identifier::random(); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let result = validate_identity_public_key_ids_dont_exist_in_state_v0( + identity_id, + &[], + &platform.drive, + None, + &mut execution_context, + platform_version, + ) + .expect("should succeed"); + + assert!(result.is_valid(), "empty list should be trivially valid"); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_exist_in_state/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_exist_in_state/v0/mod.rs index 6422faf196e..d226fd356d3 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_exist_in_state/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_exist_in_state/v0/mod.rs @@ -48,3 +48,173 @@ pub(super) fn validate_identity_public_key_ids_exist_in_state_v0( Ok(ConsensusValidationResult::new_with_data(values)) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::helpers::setup::TestPlatformBuilder; + use dpp::block::block_info::BlockInfo; + use dpp::consensus::state::state_error::StateError; + use dpp::consensus::ConsensusError; + use dpp::identity::accessors::IdentityGettersV0; + use dpp::identity::identity_public_key::accessors::v0::IdentityPublicKeyGettersV0; + use dpp::identity::Identity; + use dpp::version::DefaultForPlatformVersion; + use rand::SeedableRng; + + #[test] + fn should_fail_when_identity_has_no_keys_at_requested_ids() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let identity_id = Identifier::random(); + let missing_ids: Vec = vec![17, 42, 99]; + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let result = validate_identity_public_key_ids_exist_in_state_v0( + identity_id, + &missing_ids, + &platform.drive, + &mut execution_context, + None, + platform_version, + ) + .expect("should succeed"); + + assert!(!result.is_valid(), "requested keys should be missing"); + assert_eq!(result.errors.len(), 1); + match &result.errors[0] { + ConsensusError::StateError(StateError::MissingIdentityPublicKeyIdsError(e)) => { + let missing = e.ids(); + // All three should be reported as missing + assert_eq!(missing.len(), 3); + for id in &missing_ids { + assert!(missing.contains(id), "missing should contain {}", id); + } + } + other => panic!("expected MissingIdentityPublicKeyIdsError, got {:?}", other), + } + } + + #[test] + fn should_succeed_when_all_requested_keys_exist() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let (identity, keys_with_private): ( + Identity, + Vec<(dpp::identity::IdentityPublicKey, [u8; 32])>, + ) = Identity::random_identity_with_main_keys_with_private_key( + 2, + &mut rand::rngs::StdRng::seed_from_u64(77), + platform_version, + ) + .expect("got identity"); + + let identity_id = identity.id(); + let existing_ids: Vec = keys_with_private.iter().map(|(k, _)| k.id()).collect(); + + platform + .drive + .add_new_identity( + identity, + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("should add identity"); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let result = validate_identity_public_key_ids_exist_in_state_v0( + identity_id, + &existing_ids, + &platform.drive, + &mut execution_context, + None, + platform_version, + ) + .expect("should succeed"); + + assert!(result.is_valid(), "existing keys should validate"); + let fetched = result.data.expect("should have data"); + assert_eq!(fetched.len(), existing_ids.len()); + } + + #[test] + fn should_report_only_missing_ids_when_partial_overlap() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let (identity, keys_with_private): ( + Identity, + Vec<(dpp::identity::IdentityPublicKey, [u8; 32])>, + ) = Identity::random_identity_with_main_keys_with_private_key( + 2, + &mut rand::rngs::StdRng::seed_from_u64(88), + platform_version, + ) + .expect("got identity"); + + let identity_id = identity.id(); + let existing_id = keys_with_private[0].0.id(); + + platform + .drive + .add_new_identity( + identity, + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("should add identity"); + + // Request one existing + one definitely-missing id. + let missing_id: KeyID = 12345; + let requested = vec![existing_id, missing_id]; + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let result = validate_identity_public_key_ids_exist_in_state_v0( + identity_id, + &requested, + &platform.drive, + &mut execution_context, + None, + platform_version, + ) + .expect("should succeed"); + + assert!( + !result.is_valid(), + "should be invalid when a requested id is missing" + ); + match &result.errors[0] { + ConsensusError::StateError(StateError::MissingIdentityPublicKeyIdsError(e)) => { + let missing = e.ids(); + // Only the missing id should be reported; existing id should be filtered out. + assert_eq!(missing.len(), 1); + assert!(missing.contains(&missing_id)); + assert!(!missing.contains(&existing_id)); + } + other => panic!("expected MissingIdentityPublicKeyIdsError, got {:?}", other), + } + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_non_masternode_identity_exists/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_non_masternode_identity_exists/v0/mod.rs index 77c9734e1f6..50e76840b2f 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_non_masternode_identity_exists/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_non_masternode_identity_exists/v0/mod.rs @@ -36,3 +36,91 @@ pub(super) fn validate_non_masternode_identity_exists_v0( Ok(maybe_key.is_some()) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContextMethodsV0; + use crate::test::helpers::setup::TestPlatformBuilder; + use dpp::block::block_info::BlockInfo; + use dpp::identity::accessors::IdentityGettersV0; + use dpp::identity::Identity; + use dpp::version::DefaultForPlatformVersion; + use rand::SeedableRng; + + #[test] + fn should_return_false_when_identity_not_present() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + let missing_identity_id = Identifier::random(); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let exists = validate_non_masternode_identity_exists_v0( + &platform.drive, + &missing_identity_id, + &mut execution_context, + None, + platform_version, + ) + .expect("should not error"); + + assert!(!exists, "non-masternode identity should not be found"); + assert!( + !execution_context.operations_slice().is_empty(), + "should record the key-fetch operation" + ); + } + + #[test] + fn should_return_true_when_identity_with_master_key_present() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state(); + + // random_identity_with_main_keys_with_private_key generates identities with + // a master authentication key, so the LatestAuthenticationMasterKey request + // should find it. + let (identity, _keys): (Identity, Vec<(dpp::identity::IdentityPublicKey, [u8; 32])>) = + Identity::random_identity_with_main_keys_with_private_key( + 3, + &mut rand::rngs::StdRng::seed_from_u64(42), + platform_version, + ) + .expect("got identity"); + + let identity_id = identity.id(); + platform + .drive + .add_new_identity( + identity, + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("should add identity"); + + let mut execution_context = + StateTransitionExecutionContext::default_for_platform_version(platform_version) + .expect("should create execution context"); + + let exists = validate_non_masternode_identity_exists_v0( + &platform.drive, + &identity_id, + &mut execution_context, + None, + platform_version, + ) + .expect("should not error"); + + assert!(exists, "identity should be found with its master key"); + } +} diff --git a/packages/rs-drive-abci/src/query/document_query/v0/mod.rs b/packages/rs-drive-abci/src/query/document_query/v0/mod.rs index 2e08a1ea303..45b9868d515 100644 --- a/packages/rs-drive-abci/src/query/document_query/v0/mod.rs +++ b/packages/rs-drive-abci/src/query/document_query/v0/mod.rs @@ -1580,4 +1580,308 @@ mod tests { .expect("expected to get 2nd document") ); } + + /// When `prove: true` is set but the contract cannot be found, we must still + /// return a query-error validation result (not an Err). This pins the early-return + /// validation-ordering: contract lookup happens before prove-vs-no-prove branching. + #[test] + fn test_data_contract_not_found_with_prove_true() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let data_contract_id = vec![7u8; 32]; + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.clone(), + document_type: "niceDocument".to_string(), + r#where: vec![], + limit: 0, + order_by: vec![], + prove: true, // proof path + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(matches!( + result.errors.as_slice(), + [QueryError::Query(QuerySyntaxError::DataContractNotFound(_))] + )); + } + + /// Invalid identifier must error out before the prove branch is reached. + #[test] + fn test_invalid_document_id_with_prove_true() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let request = GetDocumentsRequestV0 { + data_contract_id: vec![0; 7], // wrong length + document_type: "niceDocument".to_string(), + r#where: vec![], + limit: 0, + order_by: vec![], + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert_invalid_identifier(result); + } + + /// Invalid where clause must short-circuit even when proof is requested. + /// This exercises the where-clause CBOR decoder error branch ahead of + /// any prove-mode logic. + #[test] + fn test_invalid_where_clause_with_prove_true() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let request = GetDocumentsRequestV0 { + data_contract_id: created_data_contract.data_contract().id().to_vec(), + document_type: "niceDocument".to_string(), + r#where: vec![0x9F], // malformed CBOR + limit: 0, + order_by: vec![], + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(matches!( + result.errors.as_slice(), + [QueryError::Query(QuerySyntaxError::DeserializationError(msg))] + if msg == "unable to decode 'where' query from cbor" + )); + } + + /// When the limit is exactly u16::MAX + 1 (one past the bound) the InvalidLimit + /// error path fires. This pins the boundary. + #[test] + fn test_limit_just_over_bound_is_rejected() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let limit = u16::MAX as u32 + 1; + + let request = GetDocumentsRequestV0 { + data_contract_id: created_data_contract.data_contract().id().to_vec(), + document_type: "niceDocument".to_string(), + r#where: vec![], + limit, + order_by: vec![], + prove: false, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(matches!( + result.errors.as_slice(), + [QueryError::Query(QuerySyntaxError::InvalidLimit(msg))] + if msg == &format!("limit {} out of bounds", limit) + )); + } + + /// Returns documents (not proof) when prove is false and at least one document + /// exists; the raw-results execution branch is exercised with actual data. + #[test] + fn test_documents_returned_without_proof_when_present() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let platform_version = PlatformVersion::latest(); + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let data_contract_id = created_data_contract.data_contract().id(); + let document_type_name = "niceDocument"; + let document_type = created_data_contract + .data_contract() + .document_type_for_name(document_type_name) + .expect("expected document type"); + + let document = document_type + .random_document(Some(11), platform_version) + .expect("expected a random doc"); + + store_document( + &platform, + created_data_contract.data_contract(), + document_type, + &document, + platform_version, + ); + + let request = GetDocumentsRequestV0 { + data_contract_id: data_contract_id.to_vec(), + document_type: document_type_name.to_string(), + r#where: vec![], + limit: 10, + order_by: vec![], + prove: false, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + let Some(GetDocumentsResponseV0 { + result: Some(get_documents_response_v0::Result::Documents(documents)), + metadata: Some(_), + }) = result.data + else { + panic!("expected documents, not a proof") + }; + assert_eq!(documents.documents.len(), 1); + } + + /// Absent document type should fail even when prove is true (error path is + /// reached before the proof branch). + #[test] + fn test_absent_document_type_with_prove_true() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let request = GetDocumentsRequestV0 { + data_contract_id: created_data_contract.data_contract().id().to_vec(), + document_type: "noSuchTypeInContract".to_string(), + r#where: vec![], + limit: 0, + order_by: vec![], + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(matches!( + result.errors.as_slice(), + [QueryError::InvalidArgument(msg)] if msg.contains("document type noSuchTypeInContract not found for contract") + )); + } + + /// Invalid start_at (too short) with prove: true must still surface the + /// InvalidStartsWithClause error rather than short-circuiting to a proof. + #[test] + fn test_invalid_start_at_clause_with_prove_true() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let request = GetDocumentsRequestV0 { + data_contract_id: created_data_contract.data_contract().id().to_vec(), + document_type: "niceDocument".to_string(), + r#where: vec![], + limit: 0, + order_by: vec![], + prove: true, + start: Some(Start::StartAt(vec![0; 4])), // wrong length + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(matches!( + result.errors.as_slice(), + [QueryError::Query(QuerySyntaxError::InvalidStartsWithClause(msg))] + if msg == &"start at should be a 32 byte identifier" + )); + } + + /// Malformed order_by CBOR must fail even when prove is true. + #[test] + fn test_invalid_order_by_with_prove_true() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let request = GetDocumentsRequestV0 { + data_contract_id: created_data_contract.data_contract().id().to_vec(), + document_type: "niceDocument".to_string(), + r#where: vec![], + limit: 0, + order_by: vec![0x9F], // malformed CBOR + prove: true, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + assert!(matches!( + result.errors.as_slice(), + [QueryError::Query(QuerySyntaxError::DeserializationError(msg))] + if msg == "unable to decode 'order_by' query from cbor" + )); + } + + /// A where clause that deserializes as valid CBOR but references a field that is + /// not on any index should be rejected by the drive document-query constructor. + /// This ensures the `DriveDocumentQuery::from_decomposed_values` error path is + /// surfaced as a validation error rather than an Err Result. + #[test] + fn test_where_clause_on_non_indexed_field_is_rejected() { + let (platform, state, version) = setup_platform(None, Network::Testnet, None); + + let created_data_contract = get_data_contract_fixture(None, 0, version.protocol_version); + store_data_contract(&platform, created_data_contract.data_contract(), version); + + let bogus_clause = drive::query::WhereClause { + field: "thisFieldIsNotIndexed".to_string(), + operator: drive::query::WhereOperator::Equal, + value: Value::Text("value".to_string()), + }; + + let where_cbor = + serialize_vec_to_cbor(vec![bogus_clause]).expect("should serialize clause to cbor"); + + let request = GetDocumentsRequestV0 { + data_contract_id: created_data_contract.data_contract().id().to_vec(), + document_type: "niceDocument".to_string(), + r#where: where_cbor, + limit: 0, + order_by: vec![], + prove: false, + start: None, + }; + + let result = platform + .query_documents_v0(request, &state, version) + .expect("expected query to succeed"); + + // Should produce a Query error (validation error, not Err). + assert!( + !result.errors.is_empty(), + "expected an error for invalid where clause" + ); + assert!( + result + .errors + .iter() + .any(|e| matches!(e, QueryError::Query(_))), + "expected a QueryError::Query variant, got: {:?}", + result.errors + ); + } } diff --git a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs index d54e69eaa32..55084afdd18 100644 --- a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs +++ b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs @@ -61,3 +61,121 @@ impl Drive { ) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::drive::DriveError; + use crate::error::Error; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + + /// Depth below the allowed minimum must produce an InvalidInput error + /// quoting both the received depth and the allowed range. + #[test] + fn depth_below_min_returns_invalid_input() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let min_depth = platform_version + .drive + .methods + .address_funds + .address_funds_query_min_depth; + // min_depth could be 0 in some versions; use a depth we know is below. + if min_depth == 0 { + // If min is 0, there is no "below" branch to exercise here. + return; + } + let too_shallow = min_depth - 1; + + let err = drive + .prove_address_funds_branch_query_v0(vec![], too_shallow, 0, platform_version) + .expect_err("depth below min should be rejected"); + match err { + Error::Drive(DriveError::InvalidInput(msg)) => { + assert!(msg.contains(&format!("{}", too_shallow))); + } + other => panic!("expected InvalidInput, got {:?}", other), + } + } + + /// Depth above the allowed maximum must produce an InvalidInput error. + #[test] + fn depth_above_max_returns_invalid_input() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let max_depth = platform_version + .drive + .methods + .address_funds + .address_funds_query_max_depth; + let too_deep = max_depth.saturating_add(1); + if too_deep == max_depth { + // Saturated at u8::MAX — can't build a "too deep" case. + return; + } + + let err = drive + .prove_address_funds_branch_query_v0(vec![], too_deep, 0, platform_version) + .expect_err("depth above max should be rejected"); + assert!(matches!(err, Error::Drive(DriveError::InvalidInput(_)))); + } + + /// Using a checkpoint height that does not exist in the drive must + /// propagate a GroveDB CheckpointNotFound (or equivalent) error. + #[test] + fn unknown_checkpoint_height_errors() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let min_depth = platform_version + .drive + .methods + .address_funds + .address_funds_query_min_depth; + let max_depth = platform_version + .drive + .methods + .address_funds + .address_funds_query_max_depth; + let depth = min_depth.max(1).min(max_depth); + + // Pick a checkpoint height that definitely does not exist. + let unknown_height = 999_999_999u64; + let err = drive + .prove_address_funds_branch_query_v0(vec![], depth, unknown_height, platform_version) + .expect_err("unknown checkpoint should error"); + // Must propagate as some error; the exact variant depends on GroveDB. + let _ = err; + } + + /// prove_address_funds_branch_query_operations_v0 populates drive_operations + /// even when the underlying call errors, provided validation passed. + #[test] + fn branch_query_operations_invalid_input_does_not_populate_ops() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let min_depth = platform_version + .drive + .methods + .address_funds + .address_funds_query_min_depth; + if min_depth == 0 { + return; + } + + let mut ops = vec![]; + let result = drive.prove_address_funds_branch_query_operations_v0( + vec![], + min_depth - 1, + 0, + &mut ops, + platform_version, + ); + assert!(result.is_err()); + // Validation error short-circuits before any GroveDB work, so ops stays empty. + assert!(ops.is_empty()); + } +} diff --git a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs index 7ef20bab617..f65c02d6246 100644 --- a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs +++ b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs @@ -44,3 +44,68 @@ impl Drive { ) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::batch::drive_op_batch::{AddressFundsOperationType, DriveOperation}; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::address_funds::PlatformAddress; + use dpp::block::block_info::BlockInfo; + + /// Trunk query operations must populate drive_operations on success. + #[test] + fn trunk_query_operations_populates_ops() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Populate at least one address so the tree isn't empty. + let ops_in = vec![DriveOperation::AddressFundsOperation( + AddressFundsOperationType::SetBalanceToAddress { + address: PlatformAddress::P2pkh([7; 20]), + nonce: 1, + balance: 42, + }, + )]; + drive + .apply_drive_operations( + ops_in, + true, + &BlockInfo::default(), + None, + platform_version, + None, + ) + .expect("apply"); + + let mut drive_operations = vec![]; + let result = drive + .prove_address_funds_trunk_query_operations_v0(&mut drive_operations, platform_version); + + match result { + Ok(proof) => { + assert!(!proof.is_empty()); + assert!(!drive_operations.is_empty()); + } + Err(_e) => { + // On some platform versions the trunk query may not yet be + // fully supported (checkpoints not initialized in this minimal + // setup). In that case drive_operations should still be + // observable — we just ensure no panic. + } + } + } + + /// Public dispatcher matches the v0 path (same result shape), whether it + /// succeeds or bubbles up an underlying error. + #[test] + fn top_level_trunk_query_returns_same_shape_as_v0() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let top = drive.prove_address_funds_trunk_query(platform_version); + let v0 = drive.prove_address_funds_trunk_query_v0(platform_version); + + assert_eq!(top.is_ok(), v0.is_ok()); + } +} diff --git a/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs b/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs index 01d2c647ffd..415c8437d58 100644 --- a/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs @@ -36,3 +36,111 @@ impl Drive { ) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::batch::drive_op_batch::{AddressFundsOperationType, DriveOperation}; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::block::block_info::BlockInfo; + use dpp::fee::Credits; + use dpp::prelude::AddressNonce; + + const ADDR_A: PlatformAddress = PlatformAddress::P2pkh([42; 20]); + const ADDR_MISSING: PlatformAddress = PlatformAddress::P2pkh([201; 20]); + + fn insert_balance(drive: &Drive, addr: PlatformAddress, nonce: AddressNonce, balance: Credits) { + let platform_version = PlatformVersion::latest(); + let ops = vec![DriveOperation::AddressFundsOperation( + AddressFundsOperationType::SetBalanceToAddress { + address: addr, + nonce, + balance, + }, + )]; + drive + .apply_drive_operations( + ops, + true, + &BlockInfo::default(), + None, + platform_version, + None, + ) + .expect("apply"); + } + + /// Prove-and-verify for an existing address returns the exact balance/nonce. + #[test] + fn prove_single_existing_address_round_trips() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + insert_balance(&drive, ADDR_A, 11, 9_999_999); + + let proof = drive + .prove_balance_and_nonce_v0(&ADDR_A, None, platform_version) + .expect("prove"); + assert!(!proof.is_empty()); + + // Round-trip via the verifier: single-address proof exposes the row. + let (_, result) = + Drive::verify_address_info(proof.as_slice(), &ADDR_A, false, platform_version) + .expect("verify"); + let (nonce, balance) = result.expect("should have value"); + assert_eq!(nonce, 11); + assert_eq!(balance, 9_999_999); + } + + /// prove_balance_and_nonce_operations_v0 populates drive_operations with at + /// least one entry tracking the proof cost. + #[test] + fn prove_operations_populates_drive_operations() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + insert_balance(&drive, ADDR_A, 1, 100); + + let mut ops = vec![]; + let proof = drive + .prove_balance_and_nonce_operations_v0(&ADDR_A, None, &mut ops, platform_version) + .expect("prove ops"); + assert!(!proof.is_empty()); + assert!(!ops.is_empty(), "drive_operations should track the proof"); + } + + /// Proving a non-existent address still produces a valid proof bytes blob + /// (an absence proof). The call itself must not error. + #[test] + fn prove_nonexistent_address_returns_absence_proof() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Populate with a different address so the tree isn't empty. + insert_balance(&drive, ADDR_A, 2, 50); + + let proof = drive + .prove_balance_and_nonce_v0(&ADDR_MISSING, None, platform_version) + .expect("prove missing"); + assert!(!proof.is_empty()); + } + + /// Attempting to prove from within a transaction is explicitly not + /// supported by GroveDB's prove path. This pins that behavior so a future + /// change that silently ignores the transaction is caught. + #[test] + fn prove_within_transaction_returns_not_supported() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + insert_balance(&drive, ADDR_A, 1, 100); + + let transaction = drive.grove.start_transaction(); + let err = drive + .prove_balance_and_nonce_v0(&ADDR_A, Some(&transaction), platform_version) + .expect_err("prove in tx must error"); + // The exact error variant is GroveDB NotSupported; we just ensure it does + // not silently succeed (which would hide a serious correctness bug). + let _ = err; + } +} diff --git a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs index bfbf1c2f4f9..c21fca7392e 100644 --- a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs @@ -117,3 +117,165 @@ impl Drive { Ok((value, fees)) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::test_helpers::setup::setup_drive; + use dpp::block::block_info::BlockInfo; + use dpp::identity::accessors::IdentityGettersV0; + use dpp::identity::Identity; + + fn new_drive_with_identity() -> (crate::drive::Drive, Identity) { + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + let identity = Identity::random_identity(5, Some(42), platform_version).expect("rand id"); + drive + .add_new_identity( + identity.clone(), + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("insert identity"); + (drive, identity) + } + + /// Fetching a contract nonce for an identity that has never interacted with + /// the contract returns None without erroring. + #[test] + fn fetch_nonce_for_identity_with_no_contract_returns_none() { + let (drive, identity) = new_drive_with_identity(); + let platform_version = PlatformVersion::first(); + + let result = drive + .fetch_identity_contract_nonce_v0( + identity.id().to_buffer(), + [9u8; 32], + true, + None, + platform_version, + ) + .expect("fetch should return Ok(None)"); + assert_eq!(result, None); + } + + /// Fetching a contract nonce for a non-existent identity returns None. + #[test] + fn fetch_nonce_for_nonexistent_identity_returns_none() { + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + + let result = drive + .fetch_identity_contract_nonce_v0([0u8; 32], [0u8; 32], true, None, platform_version) + .expect("fetch for missing identity"); + assert_eq!(result, None); + } + + /// After merging a nonce, the fetch returns Some(nonce). Exercises the + /// success round-trip through fetch_identity_contract_nonce_v0. + #[test] + fn fetch_after_merge_returns_the_stored_nonce() { + let (drive, identity) = new_drive_with_identity(); + let platform_version = PlatformVersion::first(); + + let contract_id = [3u8; 32]; + drive + .merge_identity_contract_nonce_v0( + identity.id().to_buffer(), + contract_id, + 1, + &BlockInfo::default(), + true, + None, + &mut vec![], + platform_version, + ) + .expect("merge nonce"); + + let fetched = drive + .fetch_identity_contract_nonce_v0( + identity.id().to_buffer(), + contract_id, + true, + None, + platform_version, + ) + .expect("fetch after merge"); + assert!(fetched.is_some()); + // The stored nonce is 1 (the value_filter masks out missing revisions bits). + let raw = fetched.expect("some"); + assert_eq!( + raw & dpp::identity::identity_nonce::IDENTITY_NONCE_VALUE_FILTER, + 1 + ); + } + + /// fetch_identity_contract_nonce_with_fees_v0 returns a FeeResult with + /// non-zero costs after a real merge happened. + #[test] + fn fetch_with_fees_returns_fee_result() { + let (drive, identity) = new_drive_with_identity(); + let platform_version = PlatformVersion::first(); + + let contract_id = [4u8; 32]; + drive + .merge_identity_contract_nonce_v0( + identity.id().to_buffer(), + contract_id, + 1, + &BlockInfo::default(), + true, + None, + &mut vec![], + platform_version, + ) + .expect("merge"); + + let (value, fees) = drive + .fetch_identity_contract_nonce_with_fees_v0( + identity.id().to_buffer(), + contract_id, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("fetch with fees"); + + assert!(value.is_some()); + assert!(fees.processing_fee > 0 || fees.storage_fee > 0); + } + + /// Stateless (apply=false) fetch for a non-existent identity returns None + /// — this path uses DirectQueryType::StatelessDirectQuery. + #[test] + fn stateless_fetch_missing_identity_returns_none() { + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + + let mut ops = vec![]; + let result = drive + .fetch_identity_contract_nonce_operations_v0( + [0u8; 32], + [0u8; 32], + false, // stateless + None, + &mut ops, + platform_version, + ) + .expect("stateless fetch"); + assert_eq!(result, None); + } +} diff --git a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/merge_identity_contract_nonce/v0/mod.rs b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/merge_identity_contract_nonce/v0/mod.rs index ff0d49f8d9a..3b2aa21d40f 100644 --- a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/merge_identity_contract_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/merge_identity_contract_nonce/v0/mod.rs @@ -636,4 +636,70 @@ mod tests { assert_eq!(result.error_message(), Some("nonce is an invalid value")); } + + /// Estimation-only branch (apply=false) must not error and produces a + /// MergeIdentityNonceSuccess result with the requested nonce treated as + /// new. This exercises the `estimated_costs_only_with_layer_info.is_some()` + /// path which mints cost-estimation ops instead of reading real state. + #[test] + fn merge_estimation_mode_succeeds_without_real_state() { + let contract_id = [77; 32]; + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + + // No identity is created — in estimation mode we never touch real state. + let mut drive_operations = vec![]; + let result = drive + .merge_identity_contract_nonce_v0( + [1u8; 32], + contract_id, + 3, + &BlockInfo::default(), + false, // estimation mode + None, + &mut drive_operations, + platform_version, + ) + .expect("estimation should succeed"); + // Error-free success result. + assert!(result.error_message().is_none()); + } + + /// Calling merge_identity_contract_nonce_operations_v0 directly with + /// estimated_costs_only_with_layer_info = Some(...) returns a + /// MergeIdentityNonceSuccess with drive_operations populated for cost + /// estimation (stateless layer info bucket). + #[test] + fn merge_operations_stateless_layer_info_returns_success() { + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + + let mut estimated = Some(std::collections::HashMap::< + grovedb::batch::KeyInfoPath, + grovedb::EstimatedLayerInformation, + >::new()); + + let (result, ops) = drive + .merge_identity_contract_nonce_operations_v0( + [2u8; 32], + [2u8; 32], + 5, + &BlockInfo::default(), + &mut estimated, + None, + platform_version, + ) + .expect("estimation ops"); + assert!(result.error_message().is_none()); + // The estimated layer info map must have been populated during the call. + assert!(!estimated.as_ref().unwrap().is_empty()); + // And some drive ops must have been emitted (for eventual cost calculation). + assert!(!ops.is_empty()); + } } diff --git a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs index b634d3fa4d4..c9f8ef30cae 100644 --- a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs @@ -21,3 +21,102 @@ impl Drive { ) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::test_helpers::setup::setup_drive; + use dpp::block::block_info::BlockInfo; + use dpp::identity::accessors::IdentityGettersV0; + use dpp::identity::Identity; + use platform_version::version::PlatformVersion; + + /// Producing a proof for an identity/contract pair that has never been + /// merged returns valid non-empty proof bytes (an absence proof). + #[test] + fn prove_absent_nonce_returns_proof_bytes() { + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + let identity = Identity::random_identity(5, Some(42), platform_version).expect("rand id"); + drive + .add_new_identity( + identity.clone(), + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("insert"); + + let proof = drive + .prove_identity_contract_nonce_v0( + identity.id().to_buffer(), + [0u8; 32], + None, + &platform_version.drive, + ) + .expect("prove absent"); + assert!(!proof.is_empty()); + } + + /// After merging a nonce, the proof must be non-empty and different from + /// the absence-proof case (ensures the prove path actually reads state). + #[test] + fn prove_present_nonce_differs_from_absent_proof() { + let drive = setup_drive(None); + let platform_version = PlatformVersion::first(); + drive + .create_initial_state_structure(None, platform_version) + .expect("init"); + let identity = Identity::random_identity(5, Some(42), platform_version).expect("rand id"); + drive + .add_new_identity( + identity.clone(), + false, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("insert"); + let contract_id = [5u8; 32]; + + let proof_before = drive + .prove_identity_contract_nonce_v0( + identity.id().to_buffer(), + contract_id, + None, + &platform_version.drive, + ) + .expect("prove before merge"); + + drive + .merge_identity_contract_nonce_v0( + identity.id().to_buffer(), + contract_id, + 1, + &BlockInfo::default(), + true, + None, + &mut vec![], + platform_version, + ) + .expect("merge"); + + let proof_after = drive + .prove_identity_contract_nonce_v0( + identity.id().to_buffer(), + contract_id, + None, + &platform_version.drive, + ) + .expect("prove after merge"); + + // After state change the proof must differ. + assert_ne!(proof_before, proof_after); + } +} diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs index 8cee05484d6..6bb29971c47 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs @@ -197,4 +197,110 @@ mod tests { assert_eq!(cleaned, num_entries as usize); } + + /// Cleanup on an empty expiration tree returns 0 with no error. + #[test] + fn cleanup_empty_tree_returns_zero() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let cleaned = drive + .cleanup_expired_nullifier_compactions_v0(u64::MAX, None, platform_version) + .expect("cleanup empty"); + assert_eq!(cleaned, 0); + } + + /// Cleanup must NOT remove entries whose expiration time is greater than + /// current_block_time_ms (strict boundary). + #[test] + fn cleanup_leaves_future_expirations_alone() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Force compaction to populate both compacted entries and expiration entries. + drive + .compact_nullifiers_with_current_block_v0( + &[[1u8; 32]], + 10, + 10_000, // expires at 10_000 + ONE_WEEK_IN_MS + None, + platform_version, + ) + .expect("compact"); + + // Cleanup at time < expiration: no entries removed. + let cleaned = drive + .cleanup_expired_nullifier_compactions_v0(5_000, None, platform_version) + .expect("cleanup"); + assert_eq!(cleaned, 0); + + // The compacted entry must still be present. + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch"); + assert_eq!(compacted.len(), 1); + } + + /// Cleanup at or past the expiration boundary deletes the compacted entry + /// and the corresponding expiration entry. + #[test] + fn cleanup_removes_expired_entry_and_its_compacted_counterpart() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let block_time = 10_000u64; + drive + .compact_nullifiers_with_current_block_v0( + &[[1u8; 32]], + 10, + block_time, + None, + platform_version, + ) + .expect("compact"); + + // Cleanup at a time past the expiration (block_time + 1 week + 1). + let current = + block_time + crate::drive::shielded::nullifiers::compact_nullifiers::ONE_WEEK_IN_MS + 1; + let cleaned = drive + .cleanup_expired_nullifier_compactions_v0(current, None, platform_version) + .expect("cleanup"); + assert_eq!(cleaned, 1); + + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch after cleanup"); + assert!( + compacted.is_empty(), + "compacted entry should have been cleaned up" + ); + } + + /// Corrupting an expiration entry payload (garbage item data) triggers the + /// CorruptedSerialization branch in NullifierExpirationRanges::decode. + #[test] + fn cleanup_rejects_undecodable_expiration_payload() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let expiration_path = shielded_nullifiers_expiration_time_path(); + let key = 1_000u64.to_be_bytes().to_vec(); + drive + .grove + .insert( + expiration_path.as_ref(), + &key, + Element::new_item(vec![0xFFu8; 3]), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("insert"); + + let err = drive + .cleanup_expired_nullifier_compactions_v0(u64::MAX, None, platform_version) + .expect_err("cleanup must reject undecodable payload"); + assert!(matches!(err, Error::Protocol(_))); + } } diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs index 4c67b00df6c..0de26b91546 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs @@ -165,3 +165,113 @@ impl Drive { Ok((start_block, end_block)) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + + /// Compacting with no existing recent entries (empty pool) produces a single + /// compacted entry whose range is (current_block, current_block). + #[test] + fn compact_with_empty_pool_uses_current_block_as_range() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let nullifiers = vec![[7u8; 32], [8u8; 32]]; + + let (start, end) = drive + .compact_nullifiers_with_current_block_v0( + &nullifiers, + 42, + 5_000, + None, + platform_version, + ) + .expect("compact should succeed"); + + assert_eq!(start, 42); + assert_eq!(end, 42); + + // Read back via fetch_compacted. + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch compacted"); + assert_eq!(compacted.len(), 1); + assert_eq!(compacted[0].start_block, 42); + assert_eq!(compacted[0].end_block, 42); + assert_eq!(compacted[0].nullifiers.as_slice(), nullifiers.as_slice()); + } + + /// Compacting twice with the same current_block_time_ms must merge the two + /// ranges under the same expiration key (exercising the + /// "existing_ranges is Some" branch). + #[test] + fn second_compaction_with_same_time_appends_range_to_expiration() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + drive + .compact_nullifiers_with_current_block_v0( + &[[1u8; 32]], + 10, + 1_000, + None, + platform_version, + ) + .expect("first compact"); + + drive + .compact_nullifiers_with_current_block_v0( + &[[2u8; 32]], + 20, + 1_000, // same block time → same expiration key + None, + platform_version, + ) + .expect("second compact"); + + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch"); + assert_eq!(compacted.len(), 2, "both compacted ranges must be present"); + } + + /// Compacting drains entries stored via store_nullifiers_for_block_v0 and the + /// final combined list is the concatenation of stored + current in block order. + #[test] + fn compact_drains_recent_and_concats_in_order() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + drive + .store_nullifiers_for_block_v0(&[[1u8; 32]], 1, 100, None, platform_version) + .expect("store 1"); + drive + .store_nullifiers_for_block_v0(&[[2u8; 32]], 2, 200, None, platform_version) + .expect("store 2"); + + let (start, end) = drive + .compact_nullifiers_with_current_block_v0(&[[3u8; 32]], 3, 300, None, platform_version) + .expect("compact"); + + assert_eq!(start, 1); + assert_eq!(end, 3); + + // After compaction, recent pool should be drained. + let recent = drive + .fetch_recent_nullifier_changes(0, None, None, platform_version) + .expect("fetch recent"); + assert_eq!(recent.len(), 0, "recent should be drained after compact"); + + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch compacted"); + assert_eq!(compacted.len(), 1); + // Concatenation in ascending block order: [1][2][3] + assert_eq!( + compacted[0].nullifiers.as_slice(), + &[[1u8; 32], [2u8; 32], [3u8; 32]] + ); + } +} diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/fetch_compacted_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/fetch_compacted_nullifiers/v0/mod.rs index c3a40612afd..ab349717a8b 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/fetch_compacted_nullifiers/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/fetch_compacted_nullifiers/v0/mod.rs @@ -155,3 +155,142 @@ impl Drive { Ok(compacted_changes) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::drive::shielded::nullifiers::queries::shielded_compacted_nullifiers_path; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::ProtocolError; + use grovedb::Element; + + /// Limit=Some(0) must short-circuit before any GroveDB work and return an empty vec. + #[test] + fn limit_zero_short_circuits() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let result = drive + .fetch_compacted_nullifier_changes_v0(0, Some(0), None, platform_version) + .expect("limit 0 should return Ok([])"); + assert_eq!(result.len(), 0); + } + + /// Empty compacted tree should return empty vec. + #[test] + fn fetch_empty_compacted_tree_returns_empty() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let result = drive + .fetch_compacted_nullifier_changes_v0(0, None, None, platform_version) + .expect("empty tree fetch"); + assert!(result.is_empty()); + } + + /// Querying from a block_height past the only compacted range should miss that + /// range entirely (because end_block < start_block_height). + #[test] + fn query_past_the_only_range_returns_none() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Force-insert one compacted entry spanning blocks 10..=20. + let compacted_path = shielded_compacted_nullifiers_path(); + let mut key = Vec::with_capacity(16); + key.extend_from_slice(&10u64.to_be_bytes()); + key.extend_from_slice(&20u64.to_be_bytes()); + let serialized = CompactedNullifiers::new(vec![[1u8; 32]]).encode().unwrap(); + drive + .grove + .insert( + compacted_path.as_ref(), + &key, + Element::new_item(serialized), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("insert compacted"); + + // Query from block 1000 — past the 10..=20 range. + let result = drive + .fetch_compacted_nullifier_changes_v0(1000, None, None, platform_version) + .expect("fetch"); + assert_eq!(result.len(), 0, "range [10,20] does not cover 1000"); + } + + /// Querying from a block_height inside an existing range must return that range + /// (exercising the descending-query branch that covers `start_block_height`). + #[test] + fn query_inside_range_returns_it() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Force-insert a compacted entry spanning blocks 400..=600. + let compacted_path = shielded_compacted_nullifiers_path(); + let mut key = Vec::with_capacity(16); + key.extend_from_slice(&400u64.to_be_bytes()); + key.extend_from_slice(&600u64.to_be_bytes()); + let nullifiers = vec![[0xAAu8; 32], [0xBBu8; 32]]; + let serialized = CompactedNullifiers::new(nullifiers.clone()) + .encode() + .unwrap(); + drive + .grove + .insert( + compacted_path.as_ref(), + &key, + Element::new_item(serialized), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("insert"); + + let result = drive + .fetch_compacted_nullifier_changes_v0(505, None, None, platform_version) + .expect("fetch from block inside range"); + assert_eq!(result.len(), 1); + assert_eq!(result[0].start_block, 400); + assert_eq!(result[0].end_block, 600); + assert_eq!(result[0].nullifiers.as_slice(), nullifiers.as_slice()); + } + + /// Inserting a garbage item payload under the compacted path triggers the + /// CorruptedSerialization branch in CompactedNullifiers::decode. + #[test] + fn fetch_rejects_undecodable_payload_in_compacted_tree() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let compacted_path = shielded_compacted_nullifiers_path(); + let mut key = Vec::with_capacity(16); + key.extend_from_slice(&0u64.to_be_bytes()); + key.extend_from_slice(&10u64.to_be_bytes()); + // Garbage bytes that bincode cannot decode into Vec<[u8;32]>. + drive + .grove + .insert( + compacted_path.as_ref(), + &key, + Element::new_item(vec![0xFFu8; 3]), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("force-insert garbage item"); + + match drive.fetch_compacted_nullifier_changes_v0(0, None, None, platform_version) { + Err(Error::Protocol(b)) => match b.as_ref() { + ProtocolError::CorruptedSerialization(_) => {} + other => panic!("expected CorruptedSerialization, got: {:?}", other), + }, + Err(other) => panic!("expected Error::Protocol, got: {:?}", other), + Ok(_) => panic!("should reject undecodable payload"), + } + } +} diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/fetch_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/fetch_nullifiers/v0/mod.rs index d96d797e0f1..f14ae334770 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/fetch_nullifiers/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/fetch_nullifiers/v0/mod.rs @@ -67,3 +67,110 @@ impl Drive { Ok(nullifier_changes) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::drive::shielded::nullifiers::queries::shielded_recent_nullifiers_path_vec; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::ProtocolError; + + /// Fetching from an empty nullifier pool returns an empty vec (not an error). + #[test] + fn fetch_empty_pool_returns_empty_vec() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let result = drive + .fetch_recent_nullifier_changes_v0(0, None, None, platform_version) + .expect("fetch should succeed"); + assert!(result.is_empty()); + } + + /// Fetch honors a non-zero start_height: blocks before start_height are skipped. + #[test] + fn fetch_skips_blocks_before_start_height() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Store 3 blocks. + for height in [10u64, 20, 30] { + drive + .store_nullifiers_for_block_v0( + &[[height as u8; 32]], + height, + 1_000 * height, + None, + platform_version, + ) + .expect("store"); + } + + let from_25 = drive + .fetch_recent_nullifier_changes_v0(25, None, None, platform_version) + .expect("fetch from 25"); + assert_eq!(from_25.len(), 1, "only block 30 should be returned"); + assert_eq!(from_25[0].block_height, 30); + } + + /// Fetch honors the limit parameter. With limit=2 and 3 stored blocks, we get 2. + #[test] + fn fetch_honors_limit() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + for height in [1u64, 2, 3] { + drive + .store_nullifiers_for_block_v0( + &[[height as u8; 32]], + height, + 1_000, + None, + platform_version, + ) + .expect("store"); + } + + let limited = drive + .fetch_recent_nullifier_changes_v0(0, Some(2), None, platform_version) + .expect("fetch with limit"); + assert_eq!(limited.len(), 2); + assert_eq!(limited[0].block_height, 1); + assert_eq!(limited[1].block_height, 2); + } + + /// Directly inserting a malformed (wrong-type) element into the nullifiers tree + /// triggers the CorruptedSerialization branch in the fetch loop. + #[test] + fn fetch_rejects_non_item_with_sum_item_element() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Insert a plain Item element directly (bypassing the store API). fetch expects + // ItemWithSumItem and must error on plain Item. + let path = shielded_recent_nullifiers_path_vec(); + let path_refs: Vec<&[u8]> = path.iter().map(|v| v.as_slice()).collect(); + let key = 1u64.to_be_bytes(); + drive + .grove + .insert( + path_refs.as_slice(), + &key, + grovedb::Element::new_item(vec![0u8; 4]), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("force-insert plain item"); + + match drive.fetch_recent_nullifier_changes_v0(0, None, None, platform_version) { + Err(Error::Protocol(b)) => match b.as_ref() { + ProtocolError::CorruptedSerialization(_) => {} + other => panic!("expected CorruptedSerialization, got: {:?}", other), + }, + Err(other) => panic!("expected Error::Protocol, got: {:?}", other), + Ok(_) => panic!("fetch should detect the wrong element kind"), + } + } +} diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs index 3b9192d17e0..dfd698b6d10 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs @@ -150,3 +150,154 @@ impl Drive { Ok(false) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::drive::shielded::nullifiers::queries::{ + shielded_compacted_nullifiers_path_vec, shielded_recent_nullifiers_path_vec, + }; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use grovedb::{PathQuery, Query, SizedQuery}; + + /// Storing an empty nullifier list must be a no-op: nothing is written, + /// compaction isn't triggered, and no error is returned. + #[test] + fn store_empty_nullifiers_is_noop() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + drive + .store_nullifiers_for_block_v0(&[], 10, 1_000, None, platform_version) + .expect("empty store should succeed"); + + // Nothing should be stored in the recent-nullifiers tree. + let path = shielded_recent_nullifiers_path_vec(); + let mut query = Query::new(); + query.insert_all(); + let pq = PathQuery::new(path, SizedQuery::new(query, None, None)); + let (results, _) = drive + .grove_get_path_query( + &pq, + None, + grovedb::query_result_type::QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + ) + .expect("query should succeed"); + assert_eq!(results.to_key_elements().len(), 0); + } + + /// Storing a single batch of nullifiers for a block must round-trip via + /// fetch_recent_nullifier_changes without triggering compaction. + #[test] + fn store_single_block_round_trips_via_fetch() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let nullifiers = vec![[1u8; 32], [2u8; 32], [3u8; 32]]; + + drive + .store_nullifiers_for_block_v0(&nullifiers, 42, 1_000, None, platform_version) + .expect("store should succeed"); + + let changes = drive + .fetch_recent_nullifier_changes(0, None, None, platform_version) + .expect("fetch should succeed"); + + assert_eq!(changes.len(), 1); + assert_eq!(changes[0].block_height, 42); + assert_eq!(changes[0].nullifiers.as_slice(), nullifiers.as_slice()); + + // Nothing should have been compacted (we only stored one block). + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch compacted should succeed"); + assert_eq!(compacted.len(), 0); + } + + /// Exceeding the max_nullifiers_before_compaction threshold should trigger + /// compaction: recent-nullifiers tree is drained, compacted tree has one entry. + #[test] + fn compaction_triggers_on_nullifier_threshold() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + // Max nullifiers before compaction = 2048. Store 2048 at once. + let nullifiers: Vec<[u8; 32]> = (0..2048u32).map(|i| [(i & 0xff) as u8; 32]).collect(); + + drive + .store_nullifiers_for_block_v0(&nullifiers, 1, 1_000, None, platform_version) + .expect("first store should succeed"); + + // First store doesn't trigger because count=0 before, sum=0 before. + // new_sum = 0 + 2048 = 2048 >= 2048, so compaction IS triggered on the first block. + // That means recent is empty and compacted has one entry spanning [1,1]. + let recent = drive + .fetch_recent_nullifier_changes(0, None, None, platform_version) + .expect("fetch recent"); + assert_eq!( + recent.len(), + 0, + "recent should be empty after threshold-triggered compaction" + ); + + let compacted = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch compacted"); + assert_eq!(compacted.len(), 1); + assert_eq!(compacted[0].start_block, 1); + assert_eq!(compacted[0].end_block, 1); + + // The compacted entry key must exist in GroveDB. + let path = shielded_compacted_nullifiers_path_vec(); + let mut query = Query::new(); + query.insert_all(); + let pq = PathQuery::new(path, SizedQuery::new(query, None, None)); + let (results, _) = drive + .grove_get_path_query( + &pq, + None, + grovedb::query_result_type::QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + ) + .expect("query compacted"); + assert_eq!(results.to_key_elements().len(), 1); + } + + /// Storing under a transaction (Some(&tx)) should commit the nullifiers + /// only after the transaction commits. + #[test] + fn store_within_transaction_commits_correctly() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + let transaction = drive.grove.start_transaction(); + + drive + .store_nullifiers_for_block_v0( + &[[9u8; 32]], + 5, + 1_000, + Some(&transaction), + platform_version, + ) + .expect("store in tx"); + + // Without committing, the non-transactional reader should see nothing. + let changes_no_tx = drive + .fetch_recent_nullifier_changes(0, None, None, platform_version) + .expect("fetch without tx"); + assert_eq!(changes_no_tx.len(), 0); + + drive + .commit_transaction(transaction, &platform_version.drive) + .expect("commit"); + + let changes = drive + .fetch_recent_nullifier_changes(0, None, None, platform_version) + .expect("fetch after commit"); + assert_eq!(changes.len(), 1); + assert_eq!(changes[0].block_height, 5); + } +} diff --git a/packages/rs-drive/src/drive/tokens/status/fetch_token_status/mod.rs b/packages/rs-drive/src/drive/tokens/status/fetch_token_status/mod.rs index 6d9803dd81f..68751026c04 100644 --- a/packages/rs-drive/src/drive/tokens/status/fetch_token_status/mod.rs +++ b/packages/rs-drive/src/drive/tokens/status/fetch_token_status/mod.rs @@ -106,3 +106,64 @@ impl Drive { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::tokens::status::v0::TokenStatusV0; + + /// fetch_token_status_with_costs returns (value, FeeResult) and the FeeResult + /// must be non-trivial when a real stateful fetch runs against existing data. + #[test] + fn with_costs_returns_fee_result_for_existing_token() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_id = [0x33u8; 32]; + let paused = TokenStatus::V0(TokenStatusV0 { paused: true }); + drive + .token_apply_status( + token_id, + paused.clone(), + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("apply status"); + + let (value, fees) = drive + .fetch_token_status_with_costs( + token_id, + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("fetch with costs"); + + assert_eq!(value, Some(paused)); + // A real stateful fetch should have accumulated storage or processing fees. + assert!(fees.processing_fee > 0 || fees.storage_fee > 0); + } + + /// apply=false (stateless) returns a FeeResult with only estimated costs + /// (no real IO), and the value is None when nothing exists. + #[test] + fn with_costs_stateless_missing_token_returns_none() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let (value, _fees) = drive + .fetch_token_status_with_costs( + [0xFFu8; 32], + &BlockInfo::default(), + false, // stateless → stateless direct query + no IO + None, + platform_version, + ) + .expect("stateless fetch"); + assert_eq!(value, None); + } +} diff --git a/packages/rs-drive/src/drive/tokens/status/fetch_token_status/v0/mod.rs b/packages/rs-drive/src/drive/tokens/status/fetch_token_status/v0/mod.rs index 7a8de7f45f1..3b65f7bc0dc 100644 --- a/packages/rs-drive/src/drive/tokens/status/fetch_token_status/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/status/fetch_token_status/v0/mod.rs @@ -71,3 +71,161 @@ impl Drive { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::drive::tokens::paths::token_statuses_root_path; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::block::block_info::BlockInfo; + use dpp::tokens::status::v0::TokenStatusV0; + use grovedb::Element; + + /// Fetching a token status for a token that was never inserted returns None + /// (exercises the PathKeyNotFound-swallowed branch). + #[test] + fn fetch_nonexistent_token_status_returns_none() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let missing_id = [0xCCu8; 32]; + let result = drive + .fetch_token_status_v0(missing_id, None, platform_version) + .expect("fetch missing token should return Ok(None)"); + assert_eq!(result, None); + } + + /// Round-trip: apply a paused status, fetch it back, then apply an unpaused + /// status and verify the state transition replaces the previous value. + #[test] + fn paused_to_active_transition_round_trips() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_id = [0x11u8; 32]; + + // Step 1: set paused = true. + let paused = TokenStatus::V0(TokenStatusV0 { paused: true }); + drive + .token_apply_status( + token_id, + paused.clone(), + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("apply paused"); + + let fetched = drive + .fetch_token_status_v0(token_id, None, platform_version) + .expect("fetch after paused"); + assert_eq!(fetched, Some(paused)); + + // Step 2: flip back to paused = false (active). + let active = TokenStatus::V0(TokenStatusV0 { paused: false }); + drive + .token_apply_status( + token_id, + active.clone(), + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("apply active"); + + let fetched = drive + .fetch_token_status_v0(token_id, None, platform_version) + .expect("fetch after active"); + assert_eq!(fetched, Some(active)); + } + + /// Stateless (apply=false) fetch must not require the element to exist — it + /// just reports estimated costs and returns None (exercising the stateless + /// DirectQuery branch in fetch_token_status_operations_v0). + #[test] + fn stateless_fetch_returns_none_for_missing_token_without_error() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let missing_id = [0xCDu8; 32]; + let mut drive_operations = vec![]; + let result = drive + .fetch_token_status_operations_v0( + missing_id, + false, // stateless + None, + &mut drive_operations, + platform_version, + ) + .expect("stateless fetch should succeed"); + // Stateless mode returns None; the DriveOperations vec captures cost info. + assert_eq!(result, None); + } + + /// Storing a non-Item element under a token_id key triggers the + /// "CorruptedElementType" branch when fetch expects Item. + #[test] + fn fetch_rejects_non_item_element() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_id = [0x77u8; 32]; + let path = token_statuses_root_path(); + + // Force-insert an empty subtree instead of an Item. + drive + .grove + .insert( + path.as_ref(), + &token_id, + Element::empty_tree(), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("insert wrong element type"); + + let err = drive + .fetch_token_status_v0(token_id, None, platform_version) + .expect_err("fetch should reject non-Item element"); + match err { + Error::Drive(DriveError::CorruptedElementType(_)) => {} + other => panic!("expected CorruptedElementType, got {:?}", other), + } + } + + /// Storing a garbage item payload triggers TokenStatus::deserialize_from_bytes + /// to error out. This exercises the deserialization-error propagation path. + #[test] + fn fetch_rejects_undecodable_token_status_item() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_id = [0x88u8; 32]; + let path = token_statuses_root_path(); + + // Insert a junk item — not a valid serialized TokenStatus. + drive + .grove + .insert( + path.as_ref(), + &token_id, + Element::new_item(vec![0xFFu8; 2]), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("insert garbage item"); + + let err = drive + .fetch_token_status_v0(token_id, None, platform_version) + .expect_err("fetch should fail on garbage item"); + // Allow any deserialization error variant; the important behavior is + // that fetch does not silently succeed. + let _ = err; + } +} diff --git a/packages/rs-drive/src/drive/tokens/status/fetch_token_statuses/v0/mod.rs b/packages/rs-drive/src/drive/tokens/status/fetch_token_statuses/v0/mod.rs index 8a9e1f23c3b..bffc5362071 100644 --- a/packages/rs-drive/src/drive/tokens/status/fetch_token_statuses/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/status/fetch_token_statuses/v0/mod.rs @@ -60,3 +60,89 @@ impl Drive { .collect() } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::drive::tokens::paths::token_statuses_root_path; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::block::block_info::BlockInfo; + use dpp::tokens::status::v0::TokenStatusV0; + use grovedb::Element; + + /// Passing an empty token id list yields an empty BTreeMap without error. + #[test] + fn empty_token_id_list_returns_empty_map() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let result = drive + .fetch_token_statuses_v0(&[], None, platform_version) + .expect("fetch with no ids"); + assert!(result.is_empty()); + } + + /// Mixed existing + non-existing tokens populate the map with Some / None + /// entries in a single call. + #[test] + fn fetch_mixed_existing_and_missing_tokens() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_1 = [0xA1u8; 32]; + let missing = [0xA2u8; 32]; + + let paused = TokenStatus::V0(TokenStatusV0 { paused: true }); + drive + .token_apply_status( + token_1, + paused.clone(), + &BlockInfo::default(), + true, + None, + platform_version, + ) + .expect("apply status 1"); + + let result = drive + .fetch_token_statuses_v0(&[token_1, missing], None, platform_version) + .expect("fetch both"); + assert_eq!(result.len(), 2); + assert_eq!(result.get(&token_1).cloned(), Some(Some(paused))); + assert_eq!(result.get(&missing).cloned(), Some(None)); + } + + /// Storing a non-Item element under a token_id key triggers the + /// "token tree for statuses should contain only items" branch. + #[test] + fn fetch_statuses_rejects_non_item_element() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_id = [0xBBu8; 32]; + let path = token_statuses_root_path(); + + drive + .grove + .insert( + path.as_ref(), + &token_id, + Element::empty_tree(), + None, + None, + &platform_version.drive.grove_version, + ) + .unwrap() + .expect("insert wrong element"); + + let err = drive + .fetch_token_statuses_v0(&[token_id], None, platform_version) + .expect_err("fetch should reject non-item in the statuses tree"); + match err { + Error::Drive(DriveError::CorruptedDriveState(msg)) => { + assert!(msg.contains("items")); + } + other => panic!("expected CorruptedDriveState, got {:?}", other), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/mod.rs b/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/mod.rs index 29e31ef5940..8c74e0e4725 100644 --- a/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/mod.rs +++ b/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/mod.rs @@ -122,3 +122,44 @@ impl Drive { } } } + +#[cfg(test)] +mod dispatcher_tests { + use super::*; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + + /// An empty token id list produces a PathQuery with limit=0, which GroveDB + /// rejects as "proved path queries can not be for limit 0". This test + /// pins that rejection so the downstream error-propagation branch is covered. + #[test] + fn prove_empty_token_list_errors_from_grovedb() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let err = drive + .prove_token_statuses(&[], None, platform_version) + .expect_err("empty list should bubble up a GroveDB InvalidQuery"); + // The exact variant is GroveDB; we just verify the error propagated and + // did not silently return a proof of nothing. + let _ = err; + } + + /// prove_token_statuses_with_costs returns both proof bytes and a FeeResult + /// against a real stateful query. + #[test] + fn prove_with_costs_returns_fee_result() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let token_id = [0x99u8; 32]; + let (proof, _fees) = drive + .prove_token_statuses_with_costs( + &[token_id], + &BlockInfo::default(), + None, + platform_version, + ) + .expect("prove with costs"); + assert!(!proof.is_empty()); + } +} diff --git a/packages/rs-drive/src/drive/votes/insert/contested_resource/individual_vote/register_contested_resource_identity_vote/v0/mod.rs b/packages/rs-drive/src/drive/votes/insert/contested_resource/individual_vote/register_contested_resource_identity_vote/v0/mod.rs index 233631716d4..9337c44fbfd 100644 --- a/packages/rs-drive/src/drive/votes/insert/contested_resource/individual_vote/register_contested_resource_identity_vote/v0/mod.rs +++ b/packages/rs-drive/src/drive/votes/insert/contested_resource/individual_vote/register_contested_resource_identity_vote/v0/mod.rs @@ -170,3 +170,85 @@ impl Drive { Ok(drive_operations) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::drive::votes::resolved::vote_polls::contested_document_resource_vote_poll::ContestedDocumentResourceVotePollWithContractInfo; + use crate::error::drive::DriveError; + use crate::error::Error; + use crate::util::object_size_info::DataContractOwnedResolvedInfo; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::tests::fixtures::get_dpns_data_contract_fixture; + use dpp::voting::vote_choices::resource_vote_choice::ResourceVoteChoice; + use platform_version::version::PlatformVersion; + + fn dpns_vote_poll( + document_type_name: &str, + index_name: &str, + ) -> ContestedDocumentResourceVotePollWithContractInfo { + let pv = PlatformVersion::latest(); + let data_contract = + get_dpns_data_contract_fixture(None, 0, pv.protocol_version).data_contract_owned(); + ContestedDocumentResourceVotePollWithContractInfo { + contract: DataContractOwnedResolvedInfo::OwnedDataContract(data_contract), + document_type_name: document_type_name.to_string(), + index_name: index_name.to_string(), + index_values: vec![], + } + } + + /// When vote_poll references an index that doesn't exist, the operations + /// generation path surfaces the ContestedIndexNotFound error and does NOT + /// apply anything to GroveDB. + #[test] + fn register_operations_rejects_missing_index() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let poll = dpns_vote_poll("domain", "bogus_index"); + + let err = drive + .register_contested_resource_identity_vote_operations_v0( + [7u8; 32], + 1, + poll, + ResourceVoteChoice::Abstain, + None, + None, + platform_version, + ) + .expect_err("missing index must propagate"); + match err { + Error::Drive(DriveError::ContestedIndexNotFound(_)) => {} + other => panic!("expected ContestedIndexNotFound, got {:?}", other), + } + } + + /// When the document type does not exist on the contract, operations + /// generation surfaces a DataContractError via Error::Protocol — guarding + /// against silent mis-routing of votes. + #[test] + fn register_operations_rejects_missing_document_type() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let poll = dpns_vote_poll("bogus_doc_type", "parentNameAndLabel"); + + let err = drive + .register_contested_resource_identity_vote_operations_v0( + [7u8; 32], + 4, + poll, + ResourceVoteChoice::Abstain, + None, + None, + platform_version, + ) + .expect_err("missing doc type must propagate"); + match err { + Error::Drive(_) | Error::Protocol(_) => {} + other => panic!("unexpected: {:?}", other), + } + } +} diff --git a/packages/rs-drive/src/drive/votes/insert/contested_resource/insert_stored_info_for_contested_resource_vote_poll/v0/mod.rs b/packages/rs-drive/src/drive/votes/insert/contested_resource/insert_stored_info_for_contested_resource_vote_poll/v0/mod.rs index 96e2e26a41d..ea7e1d1330b 100644 --- a/packages/rs-drive/src/drive/votes/insert/contested_resource/insert_stored_info_for_contested_resource_vote_poll/v0/mod.rs +++ b/packages/rs-drive/src/drive/votes/insert/contested_resource/insert_stored_info_for_contested_resource_vote_poll/v0/mod.rs @@ -61,3 +61,83 @@ impl Drive { Ok(drive_operations) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::drive::DriveError; + use crate::error::Error; + use crate::util::object_size_info::DataContractOwnedResolvedInfo; + use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; + use dpp::block::block_info::BlockInfo; + use dpp::tests::fixtures::get_dpns_data_contract_fixture; + use dpp::voting::vote_info_storage::contested_document_vote_poll_stored_info::ContestedDocumentVotePollStoredInfo; + + fn dpns_vote_poll( + document_type_name: &str, + index_name: &str, + ) -> ContestedDocumentResourceVotePollWithContractInfo { + let pv = PlatformVersion::latest(); + let data_contract = + get_dpns_data_contract_fixture(None, 0, pv.protocol_version).data_contract_owned(); + ContestedDocumentResourceVotePollWithContractInfo { + contract: DataContractOwnedResolvedInfo::OwnedDataContract(data_contract), + document_type_name: document_type_name.to_string(), + index_name: index_name.to_string(), + index_values: vec![], + } + } + + fn stored_info() -> ContestedDocumentVotePollStoredInfo { + let pv = PlatformVersion::latest(); + ContestedDocumentVotePollStoredInfo::new(BlockInfo::default(), pv) + .expect("construct default stored info") + } + + /// insert_stored_info_operations propagates a ContestedIndexNotFound when the + /// vote poll references an index name that does not exist on the document type. + #[test] + fn operations_rejects_missing_index_name() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let poll = dpns_vote_poll("domain", "no_such_index"); + let stored = stored_info(); + + let err = drive + .insert_stored_info_for_contested_resource_vote_poll_operations_v0( + &poll, + stored, + platform_version, + ) + .expect_err("missing index must fail"); + match err { + Error::Drive(DriveError::ContestedIndexNotFound(_)) => {} + other => panic!("expected ContestedIndexNotFound, got {:?}", other), + } + } + + /// When the document type on the poll does not exist on the contract, the + /// operations path surfaces a DataContractError — not a panic. + #[test] + fn operations_rejects_missing_document_type() { + let drive = setup_drive_with_initial_state_structure(None); + let platform_version = PlatformVersion::latest(); + + let poll = dpns_vote_poll("no_such_doc_type", "parentNameAndLabel"); + let stored = stored_info(); + + let err = drive + .insert_stored_info_for_contested_resource_vote_poll_operations_v0( + &poll, + stored, + platform_version, + ) + .expect_err("missing document type must fail"); + // Either Error::Drive or Error::Protocol, but NEVER a panic. + match err { + Error::Drive(_) | Error::Protocol(_) => {} + other => panic!("unexpected error variant: {:?}", other), + } + } +} From 5db83ffba2ab826cf7bbd9c54593ab063822d266 Mon Sep 17 00:00:00 2001 From: Quantum Explorer Date: Thu, 23 Apr 2026 16:24:51 +0800 Subject: [PATCH 2/2] test: address CodeRabbit review comments on PR #3525 Co-Authored-By: Claude Opus 4.7 (1M context) --- .../document_type/property/mod.rs | 29 ++++++++++--- .../proof/verify_is_not_spent/v0/mod.rs | 11 +++++ .../v0/mod.rs | 12 ++++++ .../v0/mod.rs | 5 ++- .../prove_address_funds_trunk_query/v0/mod.rs | 42 +++++++++++++++---- .../prove/prove_balance_and_nonce/v0/mod.rs | 17 +++++++- .../fetch_identity_contract_nonce/v0/mod.rs | 1 + .../prove_identity_contract_nonce/v0/mod.rs | 11 +++++ .../v0/mod.rs | 28 ++++++++++++- .../nullifiers/compact_nullifiers/v0/mod.rs | 34 +++++++++++++-- .../nullifiers/store_nullifiers/v0/mod.rs | 19 +++++++-- 11 files changed, 182 insertions(+), 27 deletions(-) diff --git a/packages/rs-dpp/src/data_contract/document_type/property/mod.rs b/packages/rs-dpp/src/data_contract/document_type/property/mod.rs index cd20adc950c..4375d54a766 100644 --- a/packages/rs-dpp/src/data_contract/document_type/property/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/property/mod.rs @@ -6297,8 +6297,12 @@ mod tests { #[test] fn test_read_optionally_from_object_required_field_after_finished_buffer() { - // If the inner buffer ends before a required field is read, we should get - // a CorruptedSerialization error. + // Exercises the explicit "required field after finished buffer in object" + // branch: the optional first field exhausts the inner buffer by reading + // an absence marker from a zero-length buffer (which flips + // `finished_buffer` to true), then the iterator sees a required field + // with the buffer already finished and must produce a + // CorruptedSerialization error. use integer_encoding::VarInt; let mut inner_fields = IndexMap::new(); // First field is optional @@ -6321,13 +6325,26 @@ mod tests { ); let prop = DocumentPropertyType::Object(inner_fields); - // Build inner object bytes: a is optional, marker 0 => absent. Buffer ends. - // Required field "b" then has no data to read => error. - let inner_bytes = vec![0u8]; + // Empty inner buffer: the optional-field read observes EOF on the + // absence-marker byte, returns (None, finished = true). On the next + // iteration, "b" is required and the buffer is finished => the + // targeted error branch fires. + let inner_bytes: Vec = vec![]; let mut data = inner_bytes.len().encode_var_vec(); data.extend_from_slice(&inner_bytes); let mut reader = BufReader::new(data.as_slice()); - assert!(prop.read_optionally_from(&mut reader, true).is_err()); + let err = prop + .read_optionally_from(&mut reader, true) + .expect_err("required field with finished buffer must error"); + match err { + DataContractError::CorruptedSerialization(msg) => { + assert!( + msg.contains("required field after finished buffer in object"), + "expected the finished-buffer branch, got: {msg}" + ); + } + other => panic!("expected CorruptedSerialization, got {other:?}"), + } } // ----------------------------------------------------------------------- diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs index 7da5fac9f37..8ea672330ea 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/asset_lock/proof/verify_is_not_spent/v0/mod.rs @@ -89,6 +89,17 @@ pub(super) fn verify_asset_lock_is_not_spent_and_has_enough_balance_v0( #[cfg(test)] mod tests { + // NOTE on `PlatformVersion::latest()` vs `::first()`: + // `TestPlatformBuilder::build_with_mock_rpc()` initializes the underlying + // drive / state structure at `PlatformVersion::latest()` unless an explicit + // `initial_protocol_version` is supplied. Because the function under test + // delegates to `drive.fetch_asset_lock_outpoint_info(&..., &platform_version.drive)`, + // the version passed here must match the drive version the platform was + // built with — otherwise a DriveError::UnknownVersionMismatch fires on + // every call. These tests therefore intentionally use `latest()` to stay + // aligned with the builder. If v0-specific logic is ever asserted here + // (beyond the _v0 function dispatch, which is already direct), rebuild the + // platform via `.with_initial_protocol_version(PlatformVersion::first().protocol_version)`. use super::*; use crate::platform_types::platform::PlatformRef; use crate::test::helpers::setup::TestPlatformBuilder; diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs index 5e2e0d2c5f6..5fc0292d25c 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/common/validate_identity_public_key_ids_dont_exist_in_state/v0/mod.rs @@ -182,6 +182,13 @@ mod tests { } } + /// Empty input `&[]` → `key_ids = vec![]`, `limit = Some(0)`, and the + /// built request is `SpecificKeys(vec![])`. The production path issues + /// `drive.fetch_identity_keys` which must return an empty `KeyIDVec`, so + /// the validator's branch `if !keys.is_empty()` is skipped and the + /// function yields a valid (empty) `SimpleConsensusValidationResult`. + /// This pins the "no keys to check" short-circuit behaviour: callers that + /// pass an empty slice must not trigger a duplicated-id error. #[test] fn should_pass_when_empty_key_list() { let platform_version = PlatformVersion::latest(); @@ -206,5 +213,10 @@ mod tests { .expect("should succeed"); assert!(result.is_valid(), "empty list should be trivially valid"); + assert!( + result.errors.is_empty(), + "no consensus errors expected for an empty key list, got {:?}", + result.errors + ); } } diff --git a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs index 55084afdd18..7ff547d41a2 100644 --- a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs +++ b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_branch_query/v0/mod.rs @@ -150,8 +150,9 @@ mod tests { let _ = err; } - /// prove_address_funds_branch_query_operations_v0 populates drive_operations - /// even when the underlying call errors, provided validation passed. + /// prove_address_funds_branch_query_operations_v0 short-circuits on a + /// depth-below-min validation failure: the error is returned before any + /// GroveDB work happens, so `drive_operations` must stay empty. #[test] fn branch_query_operations_invalid_input_does_not_populate_ops() { let drive = setup_drive_with_initial_state_structure(None); diff --git a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs index f65c02d6246..0786cfc6e6d 100644 --- a/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs +++ b/packages/rs-drive/src/drive/address_funds/prove/prove_address_funds_trunk_query/v0/mod.rs @@ -48,12 +48,18 @@ impl Drive { #[cfg(test)] mod tests { use super::*; + use crate::error::drive::DriveError; use crate::util::batch::drive_op_batch::{AddressFundsOperationType, DriveOperation}; use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; use dpp::address_funds::PlatformAddress; use dpp::block::block_info::BlockInfo; - /// Trunk query operations must populate drive_operations on success. + /// Trunk query operations must populate drive_operations on success. In the + /// minimal test setup no checkpoint exists yet, so the only acceptable + /// error is `DriveError::NoCheckpointsAvailable` (raised by + /// `grove_get_proved_trunk_chunk_query_v0` when + /// `GroveDBToUse::LatestCheckpoint` is selected against an empty + /// checkpoint map). Any other error means the prove-path regressed. #[test] fn trunk_query_operations_populates_ops() { let drive = setup_drive_with_initial_state_structure(None); @@ -87,17 +93,20 @@ mod tests { assert!(!proof.is_empty()); assert!(!drive_operations.is_empty()); } - Err(_e) => { - // On some platform versions the trunk query may not yet be - // fully supported (checkpoints not initialized in this minimal - // setup). In that case drive_operations should still be - // observable — we just ensure no panic. + Err(Error::Drive(DriveError::NoCheckpointsAvailable)) => { + // Expected when the test setup did not create a checkpoint. } + Err(other) => panic!( + "unexpected error from prove_address_funds_trunk_query_operations_v0: {:?}", + other + ), } } - /// Public dispatcher matches the v0 path (same result shape), whether it - /// succeeds or bubbles up an underlying error. + /// Public dispatcher matches the v0 path. When both paths succeed the + /// proof bytes must be byte-equal; when both fail that's also acceptable + /// (they must fail together). A divergence in the success/fail outcome + /// means the dispatcher disagrees with the v0 implementation. #[test] fn top_level_trunk_query_returns_same_shape_as_v0() { let drive = setup_drive_with_initial_state_structure(None); @@ -106,6 +115,21 @@ mod tests { let top = drive.prove_address_funds_trunk_query(platform_version); let v0 = drive.prove_address_funds_trunk_query_v0(platform_version); - assert_eq!(top.is_ok(), v0.is_ok()); + match (top, v0) { + (Ok(a), Ok(b)) => assert_eq!( + a, b, + "top-level dispatcher must return identical proof bytes to v0" + ), + (Err(_), Err(_)) => { + // Both paths hit the same underlying error (e.g. no checkpoint + // available in a minimal test setup). That's consistent + // behaviour — nothing further to assert. + } + (top, v0) => panic!( + "dispatcher/v0 divergence: top={:?} v0={:?}", + top.is_ok(), + v0.is_ok() + ), + } } } diff --git a/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs b/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs index 415c8437d58..784e404f31d 100644 --- a/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/address_funds/prove/prove_balance_and_nonce/v0/mod.rs @@ -109,8 +109,9 @@ mod tests { assert!(!ops.is_empty(), "drive_operations should track the proof"); } - /// Proving a non-existent address still produces a valid proof bytes blob - /// (an absence proof). The call itself must not error. + /// Proving a non-existent address produces an absence proof that verifies + /// cleanly: the proof parses via `Drive::verify_address_info`, the root + /// hash is not empty, and the returned balance/nonce is `None`. #[test] fn prove_nonexistent_address_returns_absence_proof() { let drive = setup_drive_with_initial_state_structure(None); @@ -123,6 +124,18 @@ mod tests { .prove_balance_and_nonce_v0(&ADDR_MISSING, None, platform_version) .expect("prove missing"); assert!(!proof.is_empty()); + + // Verify the absence proof round-trips: known-absent address must + // decode as `None` and must produce a non-empty root hash. + let (root_hash, result) = + Drive::verify_address_info(proof.as_slice(), &ADDR_MISSING, false, platform_version) + .expect("absence proof should verify"); + assert!(!root_hash.is_empty(), "root hash should not be empty"); + assert!( + result.is_none(), + "absent address must decode as None, got {:?}", + result + ); } /// Attempting to prove from within a transaction is explicitly not diff --git a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs index c21fca7392e..265ee0b2428 100644 --- a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/fetch_identity_contract_nonce/v0/mod.rs @@ -118,6 +118,7 @@ impl Drive { } } +#[cfg(feature = "server")] #[cfg(test)] mod tests { use super::*; diff --git a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs index c9f8ef30cae..8754f1ecb45 100644 --- a/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs +++ b/packages/rs-drive/src/drive/identity/contract_info/identity_contract_nonce/prove_identity_contract_nonce/v0/mod.rs @@ -22,8 +22,19 @@ impl Drive { } } +#[cfg(feature = "server")] #[cfg(test)] mod tests { + // NOTE: no proof-verifier roundtrip here. Unlike + // `Drive::verify_address_info`, there is no public verifier helper for + // "identity-contract-nonce" proofs — callers decode via the generic + // `GroveDb::verify_*` API on the path_query used to produce the proof, + // which requires reconstructing the query shape outside this module. The + // present tests therefore exercise prove-path correctness by comparing + // absent-vs-present proof bytes (they must differ) and asserting a + // non-empty blob; a verifier roundtrip would be more meaningful but + // belongs in a higher-level integration test that already owns the + // verifier plumbing. use super::*; use crate::util::test_helpers::setup::setup_drive; use dpp::block::block_info::BlockInfo; diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs index 6bb29971c47..755d218e6f7 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/cleanup_expired_nullifier_compactions/v0/mod.rs @@ -274,6 +274,19 @@ mod tests { compacted.is_empty(), "compacted entry should have been cleaned up" ); + + // A second cleanup must be a no-op: if the first call truly deleted + // the expiration-index entry (not just the compacted row), there is + // nothing left to iterate, so the returned count is 0. If the index + // row had been left behind the second cleanup would try to chase a + // dangling reference and either re-count 1 or error. + let cleaned_again = drive + .cleanup_expired_nullifier_compactions_v0(current, None, platform_version) + .expect("second cleanup must be a no-op"); + assert_eq!( + cleaned_again, 0, + "expiration index entry must also have been deleted" + ); } /// Corrupting an expiration entry payload (garbage item data) triggers the @@ -301,6 +314,19 @@ mod tests { let err = drive .cleanup_expired_nullifier_compactions_v0(u64::MAX, None, platform_version) .expect_err("cleanup must reject undecodable payload"); - assert!(matches!(err, Error::Protocol(_))); + // Must be the specific CorruptedSerialization variant surfaced by + // `NullifierExpirationRanges::decode` — not any other ProtocolError. + match err { + Error::Protocol(boxed) => match *boxed { + ProtocolError::CorruptedSerialization(msg) => { + assert!( + msg.contains("cannot decode nullifier expiration ranges"), + "expected the NullifierExpirationRanges decode failure, got: {msg}" + ); + } + other => panic!("expected CorruptedSerialization, got ProtocolError::{other:?}"), + }, + other => panic!("expected Error::Protocol, got {other:?}"), + } } } diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs index 0de26b91546..7d8e1b8f7e3 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/compact_nullifiers/v0/mod.rs @@ -205,17 +205,22 @@ mod tests { /// Compacting twice with the same current_block_time_ms must merge the two /// ranges under the same expiration key (exercising the - /// "existing_ranges is Some" branch). + /// "existing_ranges is Some" branch). We additionally drive a cleanup past + /// the shared expiration time: if the second compaction had *overwritten* + /// the expiration index entry instead of appending, cleanup would only + /// chase one range and leave the other compacted row dangling. #[test] fn second_compaction_with_same_time_appends_range_to_expiration() { let drive = setup_drive_with_initial_state_structure(None); let platform_version = PlatformVersion::latest(); + let block_time = 1_000u64; + drive .compact_nullifiers_with_current_block_v0( &[[1u8; 32]], 10, - 1_000, + block_time, None, platform_version, ) @@ -225,7 +230,7 @@ mod tests { .compact_nullifiers_with_current_block_v0( &[[2u8; 32]], 20, - 1_000, // same block time → same expiration key + block_time, // same block time → same expiration key None, platform_version, ) @@ -235,6 +240,29 @@ mod tests { .fetch_compacted_nullifier_changes(0, None, None, platform_version) .expect("fetch"); assert_eq!(compacted.len(), 2, "both compacted ranges must be present"); + + // Cleanup past the shared expiration must remove BOTH ranges. This is + // what proves the second compaction appended its range to the + // existing expiration-index entry rather than overwriting it — a + // single range under that key would leave one compacted row behind + // and the count would be 1, not 2. + let current = + block_time + crate::drive::shielded::nullifiers::compact_nullifiers::ONE_WEEK_IN_MS + 1; + let cleaned = drive + .cleanup_expired_nullifier_compactions_v0(current, None, platform_version) + .expect("cleanup"); + assert_eq!( + cleaned, 2, + "cleanup must delete both ranges stored under the shared expiration key" + ); + + let compacted_after = drive + .fetch_compacted_nullifier_changes(0, None, None, platform_version) + .expect("fetch after cleanup"); + assert!( + compacted_after.is_empty(), + "no compacted rows should remain after cleanup" + ); } /// Compacting drains entries stored via store_nullifiers_for_block_v0 and the diff --git a/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs index dfd698b6d10..add2c1d9dc1 100644 --- a/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs +++ b/packages/rs-drive/src/drive/shielded/nullifiers/store_nullifiers/v0/mod.rs @@ -223,16 +223,27 @@ mod tests { let drive = setup_drive_with_initial_state_structure(None); let platform_version = PlatformVersion::latest(); - // Max nullifiers before compaction = 2048. Store 2048 at once. - let nullifiers: Vec<[u8; 32]> = (0..2048u32).map(|i| [(i & 0xff) as u8; 32]).collect(); + // Derive the compaction threshold from the active platform version so + // this test stays correct if the constant is bumped in a future + // drive version. + let max_nullifiers = platform_version + .drive + .methods + .saved_block_transactions + .max_nullifiers_before_compaction; + + let nullifiers: Vec<[u8; 32]> = (0..max_nullifiers) + .map(|i| [(i & 0xff) as u8; 32]) + .collect(); drive .store_nullifiers_for_block_v0(&nullifiers, 1, 1_000, None, platform_version) .expect("first store should succeed"); // First store doesn't trigger because count=0 before, sum=0 before. - // new_sum = 0 + 2048 = 2048 >= 2048, so compaction IS triggered on the first block. - // That means recent is empty and compacted has one entry spanning [1,1]. + // new_sum = 0 + max_nullifiers >= max_nullifiers, so compaction IS + // triggered on the first block. That means recent is empty and + // compacted has one entry spanning [1,1]. let recent = drive .fetch_recent_nullifier_changes(0, None, None, platform_version) .expect("fetch recent");