From cbc147b7e31d3c3d2e993308696fdb0bcecd6ee8 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sun, 25 Jan 2026 16:44:00 -0800 Subject: [PATCH 01/77] chore: improve endpoint ordering in RPC api proto file --- proto/proto/rpc.proto | 60 ++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index f521fc1c5..b120963f2 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -17,6 +17,13 @@ service Api { // Returns the status info of the node. rpc Status(google.protobuf.Empty) returns (RpcStatus) {} + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + // Returns a Sparse Merkle Tree opening proof for each requested nullifier // // Each proof demonstrates either: @@ -46,6 +53,9 @@ service Api { // Returns the script for a note by its root. rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} + // TRANSACTION SUBMISSION ENDPOINTS + // -------------------------------------------------------------------------------------------- + // Submits proven transaction to the Miden network. Returns the node's current block height. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} @@ -63,6 +73,25 @@ service Api { // Returns the node's current block height. rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} + // STATE SYNCHRONIZATION ENDPOINTS + // -------------------------------------------------------------------------------------------- + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} + + // Returns info which can be used by the client to sync up to the tip of chain for the notes + // they are interested in. + // + // Client specifies the `note_tags` they are interested in, and the block height from which to + // search for new for matching notes for. The request will then return the next block containing + // any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block + // until reaching the tip of the chain. + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // // Note that only 16-bit prefixes are supported at this time. @@ -71,16 +100,8 @@ service Api { // Returns account vault updates for specified account within a block range. rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - // - // Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} // Returns info which can be used by the client to sync up to the latest state of the chain // for the objects (accounts and notes) the client is interested in. @@ -90,27 +111,14 @@ service Api { // in a loop until `response.block_header.block_num == response.chain_tip`, at which point // the client is fully synchronized with the chain. // - // Each update response also contains info about new notes, accounts etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. + // Each update response also contains info about new notes, accounts etc. created. It also + // returns Chain MMR delta that can be used to update the state of Chain MMR. This includes + // both chain MMR peaks and chain MMR nodes. // // For preserving some degree of privacy, note tags contain only high // part of hashes. Thus, returned data contains excessive notes, client can make // additional filtering of that data on its side. rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - - // Returns the query parameter limits configured for RPC methods. - // - // These define the maximum number of each parameter a method will accept. - // Exceeding the limit will result in the request being rejected and you should instead send - // multiple smaller requests. - rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} } // RPC STATUS From bf1c81f20f10abddcbd3ee112c5e8f496d291198 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sun, 25 Jan 2026 16:59:31 -0800 Subject: [PATCH 02/77] chore: update generated proto files --- crates/proto/src/generated/rpc.rs | 402 +++++++++++++++--------------- 1 file changed, 203 insertions(+), 199 deletions(-) diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 798a1d18e..0f436386a 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -713,6 +713,29 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); self.inner.unary(req, path, codec).await } + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + pub async fn get_limits( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + self.inner.unary(req, path, codec).await + } /// Returns a Sparse Merkle Tree opening proof for each requested nullifier /// /// Each proof demonstrates either: @@ -928,14 +951,12 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SubmitProvenBatch")); self.inner.unary(req, path, codec).await } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( + /// Returns transactions records for specific accounts within a block range. + pub async fn sync_transactions( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -947,17 +968,27 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); self.inner.unary(req, path, codec).await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( + /// Returns info which can be used by the client to sync up to the tip of chain for the notes + /// they are interested in. + /// + /// Client specifies the `note_tags` they are interested in, and the block height from which to + /// search for new for matching notes for. The request will then return the next block containing + /// any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block + /// until reaching the tip of the chain. + pub async fn sync_notes( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -969,25 +1000,19 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( + /// Note that only 16-bit prefixes are supported at this time. + pub async fn sync_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -999,31 +1024,17 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - pub async fn sync_state( + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1035,9 +1046,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); self.inner.unary(req, path, codec).await } /// Returns storage map updates for specified account and storage slots within a block range. @@ -1065,12 +1076,26 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( + /// Returns info which can be used by the client to sync up to the latest state of the chain + /// for the objects (accounts and notes) the client is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. Client is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the client is fully synchronized with the chain. + /// + /// Each update response also contains info about new notes, accounts etc. created. It also + /// returns Chain MMR delta that can be used to update the state of Chain MMR. This includes + /// both chain MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags contain only high + /// part of hashes. Thus, returned data contains excessive notes, client can make + /// additional filtering of that data on its side. + pub async fn sync_state( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1082,32 +1107,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - pub async fn get_limits( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); self.inner.unary(req, path, codec).await } } @@ -1130,6 +1132,15 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result, tonic::Status>; + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + async fn get_limits( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; /// Returns a Sparse Merkle Tree opening proof for each requested nullifier /// /// Each proof demonstrates either: @@ -1212,6 +1223,32 @@ pub mod api_server { tonic::Response, tonic::Status, >; + /// Returns transactions records for specific accounts within a block range. + async fn sync_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the client to sync up to the tip of chain for the notes + /// they are interested in. + /// + /// Client specifies the `note_tags` they are interested in, and the block height from which to + /// search for new for matching notes for. The request will then return the next block containing + /// any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block + /// until reaching the tip of the chain. + async fn sync_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. /// /// Note that only 16-bit prefixes are supported at this time. @@ -1230,20 +1267,12 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( + /// Returns storage map updates for specified account and storage slots within a block range. + async fn sync_account_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the latest state of the chain @@ -1254,9 +1283,9 @@ pub mod api_server { /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point /// the client is fully synchronized with the chain. /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. + /// Each update response also contains info about new notes, accounts etc. created. It also + /// returns Chain MMR delta that can be used to update the state of Chain MMR. This includes + /// both chain MMR peaks and chain MMR nodes. /// /// For preserving some degree of privacy, note tags contain only high /// part of hashes. Thus, returned data contains excessive notes, client can make @@ -1268,31 +1297,6 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_account_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - async fn get_limits( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; } /// RPC API for the RPC component #[derive(Debug)] @@ -1410,6 +1414,45 @@ pub mod api_server { }; Box::pin(fut) } + "/rpc.Api/GetLimits" => { + #[allow(non_camel_case_types)] + struct GetLimitsSvc(pub Arc); + impl tonic::server::UnaryService<()> for GetLimitsSvc { + type Response = super::RpcLimits; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_limits(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetLimitsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/rpc.Api/CheckNullifiers" => { #[allow(non_camel_case_types)] struct CheckNullifiersSvc(pub Arc); @@ -1775,25 +1818,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncNullifiers" => { + "/rpc.Api/SyncTransactions" => { #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); + struct SyncTransactionsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; + > tonic::server::UnaryService + for SyncTransactionsSvc { + type Response = super::SyncTransactionsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_nullifiers(&inner, request).await + ::sync_transactions(&inner, request).await }; Box::pin(fut) } @@ -1804,7 +1847,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncNullifiersSvc(inner); + let method = SyncTransactionsSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1820,25 +1863,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncAccountVault" => { + "/rpc.Api/SyncNotes" => { #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; + struct SyncNotesSvc(pub Arc); + impl tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::SyncNotesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_account_vault(&inner, request).await + ::sync_notes(&inner, request).await }; Box::pin(fut) } @@ -1849,7 +1890,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncAccountVaultSvc(inner); + let method = SyncNotesSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1865,23 +1906,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncNotes" => { + "/rpc.Api/SyncNullifiers" => { #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; + struct SyncNullifiersSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService + for SyncNullifiersSvc { + type Response = super::SyncNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_notes(&inner, request).await + ::sync_nullifiers(&inner, request).await }; Box::pin(fut) } @@ -1892,7 +1935,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncNotesSvc(inner); + let method = SyncNullifiersSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1908,23 +1951,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncState" => { + "/rpc.Api/SyncAccountVault" => { #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; + struct SyncAccountVaultSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService + for SyncAccountVaultSvc { + type Response = super::SyncAccountVaultResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_state(&inner, request).await + ::sync_account_vault(&inner, request).await }; Box::pin(fut) } @@ -1935,7 +1980,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStateSvc(inner); + let method = SyncAccountVaultSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1996,64 +2041,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncTransactions" => { + "/rpc.Api/SyncState" => { #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; + struct SyncStateSvc(pub Arc); + impl tonic::server::UnaryService + for SyncStateSvc { + type Response = super::SyncStateResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetLimits" => { - #[allow(non_camel_case_types)] - struct GetLimitsSvc(pub Arc); - impl tonic::server::UnaryService<()> for GetLimitsSvc { - type Response = super::RpcLimits; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_limits(&inner, request).await + ::sync_state(&inner, request).await }; Box::pin(fut) } @@ -2064,7 +2068,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetLimitsSvc(inner); + let method = SyncStateSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( From 692d0d5250639648a3d713e29262f1b17781e694 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 27 Jan 2026 11:07:07 -0300 Subject: [PATCH 03/77] chore: update to miden-base latest (#1595) * chore: update to miden-base latest * chore: add PR number to changelog entry * chore: replace BlockNumber::from(0) with ::GENESIS --- CHANGELOG.md | 6 + Cargo.lock | 126 +++++++++--------- Cargo.toml | 12 +- bin/network-monitor/src/counter.rs | 4 +- bin/stress-test/src/seeding/mod.rs | 2 +- crates/block-producer/src/mempool/nodes.rs | 2 +- crates/block-producer/src/test_utils/batch.rs | 2 +- crates/ntx-builder/src/actor/note_state.rs | 20 +-- crates/ntx-builder/src/store.rs | 4 +- crates/rpc/src/server/api.rs | 4 +- .../db/migrations/2025062000000_setup/up.sql | 2 +- crates/store/src/db/models/queries/notes.rs | 32 ++--- crates/store/src/db/schema.rs | 2 +- crates/store/src/db/tests.rs | 10 +- crates/store/src/inner_forest/mod.rs | 6 +- 15 files changed, 119 insertions(+), 115 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3596dae5b..c99abf315 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## v0.14.0 (TBC) + +### Enhancements + +- [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). + ## v0.13.0 (2026-01-23) ### Enhancements diff --git a/Cargo.lock b/Cargo.lock index 3b9891f4e..69b544164 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -351,7 +362,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.10.5", "proc-macro2", "quote", "regex", @@ -1323,7 +1334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -1668,6 +1679,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -2098,7 +2112,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -2122,15 +2136,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.14.0" @@ -2267,9 +2272,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "librocksdb-sys" @@ -2476,9 +2481,8 @@ dependencies = [ [[package]] name = "miden-agglayer" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccebe2f7aa9e173913a9da60bd21e8402936c784fdf1eba8c48956667def354e" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "fs-err", "miden-assembly", @@ -2545,9 +2549,8 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9c89257b227d0668105b4a6e81ea33956795c89549cc1baa3f253d753e81e5" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -3037,9 +3040,8 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfed3ae85e2fabbf8a2e7416e388a40519e10cbf0cdceda222ef858c2f270b35" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "bech32", "fs-err", @@ -3068,9 +3070,8 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f41a93dd532baa3a4c821073baad5d700aab119b3831ef7fdf004e196c10157e" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "proc-macro2", "quote", @@ -3159,9 +3160,8 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16144e41701794b45b7a361ec7d35407a90c4d1d129a43df0bc278d5f3327999" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "fs-err", "miden-assembly", @@ -3177,9 +3177,8 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd0c6d0ceb4e6719a5afe76b9627b73e91506ebb66350d56ca9ed606127e4dc" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3194,14 +3193,14 @@ dependencies = [ "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", + "thiserror 2.0.18", "winterfell", ] [[package]] name = "miden-tx" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97f26c833633cea0d95ddb38bcd8bd7e8225b4e7746c15070cb9ab7b85e248c" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "miden-processor", "miden-protocol", @@ -3213,9 +3212,8 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0669ce9d9c7aacd49e4923edb88fe668e370c02a754d1564b10a97501e37310f" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" dependencies = [ "miden-protocol", "miden-tx", @@ -3506,9 +3504,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-derive" @@ -3855,7 +3853,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3891,7 +3889,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -3981,7 +3979,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -4013,7 +4011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.1", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] @@ -4355,7 +4353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck 0.5.0", - "itertools 0.14.0", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -4377,7 +4375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.10.5", "proc-macro2", "quote", "syn 2.0.114", @@ -4830,7 +4828,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4843,7 +4841,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -5475,7 +5473,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -5594,9 +5592,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.45" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" dependencies = [ "deranged", "itoa", @@ -5609,15 +5607,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" dependencies = [ "num-conv", "time-core", @@ -6255,9 +6253,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -6448,7 +6446,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6942,18 +6940,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", @@ -7022,9 +7020,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" [[package]] name = "zstd" diff --git a/Cargo.toml b/Cargo.toml index cf690b306..b2b2a269b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,12 +49,12 @@ miden-node-validator = { path = "crates/validator", version = "0.13" } miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.13" } -miden-protocol = { default-features = false, version = "0.13" } -miden-standards = { version = "0.13" } -miden-testing = { version = "0.13" } -miden-tx = { default-features = false, version = "0.13" } -miden-tx-batch-prover = { version = "0.13" } +miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } +miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } +miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c04426733..0b5638e53 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -22,10 +22,10 @@ use miden_protocol::note::{ NoteAssets, NoteAttachment, NoteExecutionHint, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, }; @@ -872,7 +872,7 @@ fn create_network_note( Felt::new(rng.random()), ]); - let recipient = NoteRecipient::new(serial_num, script, NoteInputs::new(vec![])?); + let recipient = NoteRecipient::new(serial_num, script, NoteStorage::new(vec![])?); let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index e0fe79338..e8cfd3395 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -366,7 +366,7 @@ fn create_batch(txs: &[ProvenTransaction], block_ref: &BlockHeader) -> ProvenBat account_updates, InputNotes::new(input_notes).unwrap(), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(txs.iter().map(TransactionHeader::from).collect()), ) .unwrap() diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index 461a836c2..c41e305fa 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -416,7 +416,7 @@ mod tests { BTreeMap::from([(account_update.account_id(), account_update)]), InputNotes::default(), Vec::default(), - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(vec![tx_header]), ) .unwrap(); diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index ecbd21586..ca705e241 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -66,7 +66,7 @@ impl TransactionBatchConstructor for ProvenBatch { account_updates, InputNotes::new_unchecked(input_notes), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked( txs.into_iter().map(TransactionHeader::from).collect(), ), diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs index 87b91fc21..b7f5ef180 100644 --- a/crates/ntx-builder/src/actor/note_state.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -232,16 +232,16 @@ mod tests { #[rstest::rstest] #[test] - #[case::all_zero(Some(BlockNumber::from(0)), BlockNumber::from(0), 0, true)] - #[case::no_attempts(None, BlockNumber::from(0), 0, true)] - #[case::one_attempt(Some(BlockNumber::from(0)), BlockNumber::from(2), 1, true)] - #[case::three_attempts(Some(BlockNumber::from(0)), BlockNumber::from(3), 3, true)] - #[case::ten_attempts(Some(BlockNumber::from(0)), BlockNumber::from(13), 10, true)] - #[case::twenty_attempts(Some(BlockNumber::from(0)), BlockNumber::from(149), 20, true)] - #[case::one_attempt_false(Some(BlockNumber::from(0)), BlockNumber::from(1), 1, false)] - #[case::three_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(2), 3, false)] - #[case::ten_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(12), 10, false)] - #[case::twenty_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(148), 20, false)] + #[case::all_zero(Some(BlockNumber::GENESIS), BlockNumber::GENESIS, 0, true)] + #[case::no_attempts(None, BlockNumber::GENESIS, 0, true)] + #[case::one_attempt(Some(BlockNumber::GENESIS), BlockNumber::from(2), 1, true)] + #[case::three_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(3), 3, true)] + #[case::ten_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(13), 10, true)] + #[case::twenty_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(149), 20, true)] + #[case::one_attempt_false(Some(BlockNumber::GENESIS), BlockNumber::from(1), 1, false)] + #[case::three_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(2), 3, false)] + #[case::ten_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(12), 10, false)] + #[case::twenty_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(148), 20, false)] fn backoff_has_passed( #[case] last_attempt_block_num: Option, #[case] current_block_num: BlockNumber, diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index fa63c7b67..a784f8aa7 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -236,10 +236,10 @@ impl StoreClient { &self, sender: tokio::sync::mpsc::Sender, ) -> Result<(), StoreError> { - let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + let mut block_range = BlockNumber::GENESIS..=BlockNumber::MAX; while let Some(next_start) = self.load_accounts_page(block_range, &sender).await? { - block_range = next_start..=BlockNumber::from(u32::MAX); + block_range = next_start..=BlockNumber::MAX; } Ok(()) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 13d26962e..d29ee7166 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -294,7 +294,7 @@ impl api_server::Api for RpcService { Arc::make_mut(&mut mast).strip_decorators(); let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); + NoteRecipient::new(note.serial_num(), script, note.storage().clone()); let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, @@ -356,7 +356,7 @@ impl api_server::Api for RpcService { Arc::make_mut(&mut mast).strip_decorators(); let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); + NoteRecipient::new(note.serial_num(), script, note.storage().clone()); let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 0858e71d1..f30a34a51 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -59,7 +59,7 @@ CREATE TABLE notes ( consumed_at INTEGER, -- Block number when the note was consumed nullifier BLOB, -- Only known for public notes, null for private notes assets BLOB, - inputs BLOB, + storage BLOB, script_root BLOB, serial_num BLOB, diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a2ab7b1bb..ef93f0ffe 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -41,10 +41,10 @@ use miden_protocol::note::{ NoteDetails, NoteId, NoteInclusionProof, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, Nullifier, @@ -203,7 +203,7 @@ pub(crate) fn select_notes_since_block_by_tag_and_sender( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -283,7 +283,7 @@ pub(crate) fn select_existing_note_commitments( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -427,7 +427,7 @@ pub(crate) fn select_note_script_by_root( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script, @@ -575,7 +575,7 @@ impl TryInto for NoteSyncRecordRawRow { #[diesel(check_for_backend(Sqlite))] pub struct NoteDetailsRawRow { pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, } @@ -601,7 +601,7 @@ pub struct NoteRecordWithScriptRawJoined { // #[diesel(embed)] // pub metadata: NoteMetadataRaw, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, // #[diesel(embed)] @@ -623,7 +623,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, } = note; @@ -638,7 +638,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, script, @@ -666,7 +666,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { attachment, // metadata ^^^, assets, - inputs, + storage, serial_num, //details ^^^, inclusion_path, @@ -675,7 +675,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { } = raw; let index = BlockNoteIndexRawRow { batch_index, note_index }; let metadata = NoteMetadataRawRow { note_type, sender, tag, attachment }; - let details = NoteDetailsRawRow { assets, inputs, serial_num }; + let details = NoteDetailsRawRow { assets, storage, serial_num }; let metadata = metadata.try_into()?; let committed_at = BlockNumber::from_raw_sql(committed_at)?; @@ -684,16 +684,16 @@ impl TryInto for NoteRecordWithScriptRawJoined { let script = script.map(|script| NoteScript::read_from_bytes(&script[..])).transpose()?; let details = if let NoteDetailsRawRow { assets: Some(assets), - inputs: Some(inputs), + storage: Some(storage), serial_num: Some(serial_num), } = details { - let inputs = NoteInputs::read_from_bytes(&inputs[..])?; + let storage = NoteStorage::read_from_bytes(&storage[..])?; let serial_num = Word::read_from_bytes(&serial_num[..])?; let script = script.ok_or_else(|| { DatabaseError::conversiont_from_sql::(None) })?; - let recipient = NoteRecipient::new(serial_num, script, inputs); + let recipient = NoteRecipient::new(serial_num, script, storage); let assets = NoteAssets::read_from_bytes(&assets[..])?; Some(NoteDetails::new(assets, recipient)) } else { @@ -730,7 +730,7 @@ pub struct NoteRecordRawRow { pub attachment: Vec, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, pub inclusion_path: Vec, @@ -868,7 +868,7 @@ pub struct NoteInsertRow { pub consumed_at: Option, pub nullifier: Option>, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub script_root: Option>, pub serial_num: Option>, } @@ -902,7 +902,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRow { consumed_at: None::, // New notes are always unconsumed. nullifier: nullifier.as_ref().map(Nullifier::to_bytes), assets: note.details.as_ref().map(|d| d.assets().to_bytes()), - inputs: note.details.as_ref().map(|d| d.inputs().to_bytes()), + storage: note.details.as_ref().map(|d| d.storage().to_bytes()), script_root: note.details.as_ref().map(|d| d.script().root().to_bytes()), serial_num: note.details.as_ref().map(|d| d.serial_num().to_bytes()), } diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 0ae4b8e1e..e14d510c1 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -74,7 +74,7 @@ diesel::table! { consumed_at -> Nullable, nullifier -> Nullable, assets -> Nullable, - inputs -> Nullable, + storage -> Nullable, script_root -> Nullable, serial_num -> Nullable, } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 6bd26dda1..fbc929564 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -169,7 +169,7 @@ fn sql_select_transactions() { queries::select_transactions_by_accounts_and_block_range( conn, &[AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap()], - BlockNumber::from(0)..=BlockNumber::from(2), + BlockNumber::GENESIS..=BlockNumber::from(2), ) .unwrap() } @@ -824,7 +824,7 @@ fn db_account() { let res = queries::select_accounts_by_block_range( conn, &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), + BlockNumber::GENESIS..=u32::MAX.into(), ) .unwrap(); assert!(res.is_empty()); @@ -850,7 +850,7 @@ fn db_account() { let res = queries::select_accounts_by_block_range( conn, &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), + BlockNumber::GENESIS..=u32::MAX.into(), ) .unwrap(); assert_eq!( @@ -889,7 +889,7 @@ fn notes() { let block_num_1 = 1.into(); create_block(conn, block_num_1); - let block_range = BlockNumber::from(0)..=BlockNumber::from(1); + let block_range = BlockNumber::GENESIS..=BlockNumber::from(1); // test empty table let (res, last_included_block) = @@ -2021,7 +2021,7 @@ fn db_roundtrip_transactions() { let retrieved = queries::select_transactions_by_accounts_and_block_range( &mut conn, &[account_id], - BlockNumber::from(0)..=BlockNumber::from(2), + BlockNumber::GENESIS..=BlockNumber::from(2), ) .unwrap(); diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 29f71763b..2154cde70 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -290,7 +290,7 @@ impl InnerForest { /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::MAX)) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) } @@ -449,7 +449,7 @@ impl InnerForest { self.storage_map_roots .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..=(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) @@ -465,7 +465,7 @@ impl InnerForest { self.storage_entries .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map(|(_, entries)| entries.clone()) From 9661ed95a200360bf710fa115f740c714b5a7fbf Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Tue, 27 Jan 2026 14:37:36 -0800 Subject: [PATCH 04/77] chore: increment crate versions to v0.14.0 --- CHANGELOG.md | 3 +- Cargo.lock | 177 +++++++++++++++++++++++++-------------------------- Cargo.toml | 22 +++---- 3 files changed, 99 insertions(+), 103 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab20c1710..f9703ae72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,11 @@ # Changelog -## v0.14.0 (TBC) +## v0.14.0 (TBD) ### Enhancements - [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). + ## v0.13.2 (2026-01-27) ### Fixes diff --git a/Cargo.lock b/Cargo.lock index 2117ee722..c338dda5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -362,7 +351,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -651,19 +640,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" dependencies = [ "clap_builder", - "clap_derive 4.5.49", + "clap_derive 4.5.55", ] [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" dependencies = [ "anstream", "anstyle", @@ -686,9 +675,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -791,7 +780,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.54", + "clap 4.5.55", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1334,7 +1323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -1679,9 +1668,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -2112,7 +2098,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -2136,6 +2122,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -2482,7 +2477,7 @@ dependencies = [ [[package]] name = "miden-agglayer" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "fs-err", "miden-assembly", @@ -2497,9 +2492,9 @@ dependencies = [ [[package]] name = "miden-air" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" +checksum = "ab2f1db9cdbd5da3eaf07fa0a8122d27b575f96b0699388c98f6c0e468cb9c1f" dependencies = [ "miden-core", "miden-utils-indexing", @@ -2510,9 +2505,9 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c6a18e29c03141cf9044604390a00691c7342924ec865b4acfdd560ff41ede" +checksum = "cf4aba6bc5cfda2393ecc032b55caabde289fb980650560f8333803db4e48f09" dependencies = [ "env_logger", "log", @@ -2525,9 +2520,9 @@ dependencies = [ [[package]] name = "miden-assembly-syntax" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7458ff670f5a514bf972aa84d6e1851a4c4e9afa351f53b71bdc2218b99254b6" +checksum = "23eae66f2a55c2a0666f4ed896b61797845b528435ad2bae41fd9a221f94bad7" dependencies = [ "aho-corasick", "env_logger", @@ -2550,7 +2545,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2558,9 +2553,9 @@ dependencies = [ [[package]] name = "miden-core" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a5c9c8c3d42ae8381ed49e47ff9ad2d2e345c4726761be36b7d4000ebb40ae" +checksum = "2716bb01f07f0b19398e3d9785e23a724b89aef64d614a9073c1d44c6898a9a9" dependencies = [ "derive_more", "itertools 0.14.0", @@ -2580,9 +2575,9 @@ dependencies = [ [[package]] name = "miden-core-lib" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6556494ea5576803730fa15015bee6bd9d1a117450f22e7df0883421e7423674" +checksum = "9ac97f4fb334ee842663f99f33677beacc7bdf4b7d4eeff419c2cd98a5a68bfa" dependencies = [ "env_logger", "fs-err", @@ -2642,9 +2637,9 @@ dependencies = [ [[package]] name = "miden-debug-types" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19123e896f24b575e69921a79a39a0a4babeb98404a8601017feb13b75d653b3" +checksum = "b421786850ce05627355ee616c4a5fdc4a9ad1591859ede5e5564ab74aa4abd2" dependencies = [ "memchr", "miden-crypto", @@ -2669,9 +2664,9 @@ dependencies = [ [[package]] name = "miden-mast-package" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d6a322b91efa1bb71e224395ca1fb9ca00e2614f89427e35d8c42a903868a3" +checksum = "169025a61c2ca2e8a0f53f20a7bdcbdd1f8e34f528676137208bff64944652bb" dependencies = [ "derive_more", "miden-assembly-syntax", @@ -2723,11 +2718,11 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "axum", - "clap 4.5.54", + "clap 4.5.55", "hex", "humantime", "miden-node-proto", @@ -2750,10 +2745,10 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", - "clap 4.5.54", + "clap 4.5.55", "figment", "fs-err", "hex", @@ -2771,7 +2766,7 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", @@ -2807,7 +2802,7 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.13.2" +version = "0.14.0" dependencies = [ "quote", "syn 2.0.114", @@ -2815,7 +2810,7 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "futures", @@ -2839,7 +2834,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", @@ -2863,7 +2858,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.13.2" +version = "0.14.0" dependencies = [ "fs-err", "miette", @@ -2873,7 +2868,7 @@ dependencies = [ [[package]] name = "miden-node-rpc" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "futures", @@ -2905,7 +2900,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", @@ -2943,9 +2938,9 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.13.2" +version = "0.14.0" dependencies = [ - "clap 4.5.54", + "clap 4.5.55", "fs-err", "futures", "miden-air", @@ -2973,7 +2968,7 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "bytes", @@ -3001,7 +2996,7 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "miden-node-proto", @@ -3020,9 +3015,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a659fac55de14647e2695f03d96b83ff94fe65fd31e74d81c225ec52af25acf" +checksum = "a18a6a5eebe64e81a29be6321ee8f4478c6bfaf619b7689825884e8cd308c044" dependencies = [ "itertools 0.14.0", "miden-air", @@ -3041,7 +3036,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "bech32", "fs-err", @@ -3071,7 +3066,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "proc-macro2", "quote", @@ -3080,9 +3075,9 @@ dependencies = [ [[package]] name = "miden-prover" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5df61f50f27886f6f777d6e0cdf785f7db87dd881799a84a801e7330c189c8" +checksum = "83070f0ca1a08235362e990238b6487191f814054aaebcc40883a073fdcd18f9" dependencies = [ "miden-air", "miden-debug-types", @@ -3094,13 +3089,13 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.13.2" +version = "0.14.0" dependencies = [ "anyhow", "async-trait", "axum", "bytes", - "clap 4.5.54", + "clap 4.5.55", "http", "humantime", "miden-block-prover", @@ -3140,7 +3135,7 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.13.2" +version = "0.14.0" dependencies = [ "fs-err", "getrandom 0.3.4", @@ -3161,7 +3156,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "fs-err", "miden-assembly", @@ -3178,7 +3173,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3200,7 +3195,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "miden-processor", "miden-protocol", @@ -3213,7 +3208,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#fda68e1008f8b7b7633b8c3dfc6a0fe4e150dd32" +source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" dependencies = [ "miden-protocol", "miden-tx", @@ -3221,9 +3216,9 @@ dependencies = [ [[package]] name = "miden-utils-core-derive" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa207ffd8b26a79d9b5b246a352812f0015c0bb8f75492ec089c5c8e6d5f9e2b" +checksum = "c9fc6d350fb9ad44797e8d0a1feaacaa6ee4079ef752d9ababc101ffc40ec354" dependencies = [ "proc-macro2", "quote", @@ -3232,9 +3227,9 @@ dependencies = [ [[package]] name = "miden-utils-diagnostics" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2f55477d410542a5d8990ca04856adf5bef91bfa3b54ca3c03a5ff14a6e25c" +checksum = "af2462fb2e750247a56264eddf40e2e1c8d96ff9379abe73acbcbe81e530e1d5" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3245,18 +3240,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" +checksum = "57046b5c263b78e7fa5a6e328ca852e6319cf844faa26fbdcbb128ec555deb2a" dependencies = [ "thiserror 2.0.18", ] [[package]] name = "miden-utils-sync" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7fa8f5fd27f122c83f55752f2a964bbfc2b713de419e9c152f7dcc05c194ec" +checksum = "e2d3e129b62099672a1ffc012ab2e26ee7f2b35e4ca18ca1f726b88c53546ddd" dependencies = [ "lock_api", "loom", @@ -3265,9 +3260,9 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" +checksum = "fe033af062937938ded511e5238db3bf8e0c1a30205850d62fb23271b3c96f85" dependencies = [ "miden-air", "miden-core", @@ -3853,7 +3848,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3889,7 +3884,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -3979,7 +3974,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -4011,7 +4006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4353,7 +4348,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck 0.5.0", - "itertools 0.10.5", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -4375,7 +4370,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.114", @@ -4828,7 +4823,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4841,7 +4836,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5243,9 +5238,9 @@ checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" @@ -5473,7 +5468,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6446,7 +6441,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 985d954e9..839d354f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" rust-version = "1.90" -version = "0.13.2" +version = "0.14.0" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] @@ -36,17 +36,17 @@ opt-level = 2 [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } -miden-node-proto = { path = "crates/proto", version = "0.13" } -miden-node-proto-build = { path = "proto", version = "0.13" } -miden-node-rpc = { path = "crates/rpc", version = "0.13" } -miden-node-store = { path = "crates/store", version = "0.13" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } +miden-node-proto = { path = "crates/proto", version = "0.14" } +miden-node-proto-build = { path = "proto", version = "0.14" } +miden-node-rpc = { path = "crates/rpc", version = "0.14" } +miden-node-store = { path = "crates/store", version = "0.14" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.13" } -miden-node-validator = { path = "crates/validator", version = "0.13" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } +miden-node-utils = { path = "crates/utils", version = "0.14" } +miden-node-validator = { path = "crates/validator", version = "0.14" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } # miden-base aka protocol dependencies. These should be updated in sync. miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } From 2240ce6a99d38fea3f4ea2915deaa7c78c2ac9cf Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Wed, 28 Jan 2026 12:23:04 +1300 Subject: [PATCH 05/77] chore: move `apply_block()` in the store into separate file (#1604) --- crates/store/src/state/apply_block.rs | 302 +++++++++++++++++++++++++ crates/store/src/state/mod.rs | 306 +------------------------- 2 files changed, 310 insertions(+), 298 deletions(-) create mode 100644 crates/store/src/state/apply_block.rs diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs new file mode 100644 index 000000000..9b0bf6237 --- /dev/null +++ b/crates/store/src/state/apply_block.rs @@ -0,0 +1,302 @@ +use std::sync::Arc; + +use miden_node_utils::ErrorReport; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::ProvenBlock; +use miden_protocol::note::NoteDetails; +use miden_protocol::transaction::OutputNote; +use miden_protocol::utils::Serializable; +use tokio::sync::oneshot; +use tracing::{Instrument, info, info_span, instrument}; + +use crate::db::NoteRecord; +use crate::errors::{ApplyBlockError, InvalidBlockError}; +use crate::state::State; +use crate::{COMPONENT, HistoricalError}; + +impl State { + /// Apply changes of a new block to the DB and in-memory data structures. + /// + /// ## Note on state consistency + /// + /// The server contains in-memory representations of the existing trees, the in-memory + /// representation must be kept consistent with the committed data, this is necessary so to + /// provide consistent results for all endpoints. In order to achieve consistency, the + /// following steps are used: + /// + /// - the request data is validated, prior to starting any modifications. + /// - block is being saved into the store in parallel with updating the DB, but before + /// committing. This block is considered as candidate and not yet available for reading + /// because the latest block pointer is not updated yet. + /// - a transaction is open in the DB and the writes are started. + /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the + /// in-memory representations, which are consistent at this stage. + /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is + /// acquired, preventing concurrent reads to the in-memory data, since that will be + /// out-of-sync w.r.t. the DB. + /// - the DB transaction is committed, and requests that read only from the DB can proceed to + /// use the fresh data. + /// - the in-memory structures are updated, including the latest block pointer and the lock is + /// released. + // TODO: This span is logged in a root span, we should connect it to the parent span. + #[allow(clippy::too_many_lines)] + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { + let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; + + let header = block.header(); + + let tx_commitment = block.body().transactions().commitment(); + + if header.tx_commitment() != tx_commitment { + return Err(InvalidBlockError::InvalidBlockTxCommitment { + expected: tx_commitment, + actual: header.tx_commitment(), + } + .into()); + } + + let block_num = header.block_num(); + let block_commitment = header.commitment(); + + // ensures the right block header is being processed + let prev_block = self + .db + .select_block_header_by_block_num(None) + .await? + .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; + + let expected_block_num = prev_block.block_num().child(); + if block_num != expected_block_num { + return Err(InvalidBlockError::NewBlockInvalidBlockNum { + expected: expected_block_num, + submitted: block_num, + } + .into()); + } + if header.prev_block_commitment() != prev_block.commitment() { + return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); + } + + let block_data = block.to_bytes(); + + // Save the block to the block store. In a case of a rolled-back DB transaction, the + // in-memory state will be unchanged, but the block might still be written into the + // block store. Thus, such block should be considered as block candidates, but not + // finalized blocks. So we should check for the latest block when getting block from + // the store. + let store = Arc::clone(&self.block_store); + let block_save_task = tokio::spawn( + async move { store.save_block(block_num, &block_data).await }.in_current_span(), + ); + + // scope to read in-memory data, compute mutations required for updating account + // and nullifier trees, and validate the request + let ( + nullifier_tree_old_root, + nullifier_tree_update, + account_tree_old_root, + account_tree_update, + ) = { + let inner = self.inner.read().await; + + let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); + + // nullifiers can be produced only once + let duplicate_nullifiers: Vec<_> = block + .body() + .created_nullifiers() + .iter() + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) + .copied() + .collect(); + if !duplicate_nullifiers.is_empty() { + return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); + } + + // compute updates for the in-memory data structures + + // new_block.chain_root must be equal to the chain MMR root prior to the update + let peaks = inner.blockchain.peaks(); + if peaks.hash_peaks() != header.chain_commitment() { + return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); + } + + // compute update for nullifier tree + let nullifier_tree_update = inner + .nullifier_tree + .compute_mutations( + block + .body() + .created_nullifiers() + .iter() + .map(|nullifier| (*nullifier, block_num)), + ) + .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; + + if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); + } + + // compute update for account tree + let account_tree_update = inner + .account_tree + .compute_mutations( + block + .body() + .updated_accounts() + .iter() + .map(|update| (update.account_id(), update.final_state_commitment())), + ) + .map_err(|e| match e { + HistoricalError::AccountTreeError(err) => { + InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) + }, + HistoricalError::MerkleError(_) => { + panic!("Unexpected MerkleError during account tree mutation computation") + }, + })?; + + if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); + } + + ( + inner.nullifier_tree.root(), + nullifier_tree_update, + inner.account_tree.root_latest(), + account_tree_update, + ) + }; + + // build note tree + let note_tree = block.body().compute_block_note_tree(); + if note_tree.root() != header.note_root() { + return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); + } + + let notes = block + .body() + .output_notes() + .map(|(note_index, note)| { + let (details, nullifier) = match note { + OutputNote::Full(note) => { + (Some(NoteDetails::from(note)), Some(note.nullifier())) + }, + OutputNote::Header(_) => (None, None), + note @ OutputNote::Partial(_) => { + return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( + note.clone(), + ))); + }, + }; + + let inclusion_path = note_tree.open(note_index); + + let note_record = NoteRecord { + block_num, + note_index, + note_id: note.id().as_word(), + note_commitment: note.commitment(), + metadata: note.metadata().clone(), + details, + inclusion_path, + }; + + Ok((note_record, nullifier)) + }) + .collect::, InvalidBlockError>>()?; + + // Signals the transaction is ready to be committed, and the write lock can be acquired + let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); + // Signals the write lock has been acquired, and the transaction can be committed + let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); + + // Extract public account updates with deltas before block is moved into async task. + // Private accounts are filtered out since they don't expose their state changes. + let account_deltas = + Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { + match update.details() { + AccountUpdateDetails::Delta(delta) => Some(delta.clone()), + AccountUpdateDetails::Private => None, + } + })); + + // The DB and in-memory state updates need to be synchronized and are partially + // overlapping. Namely, the DB transaction only proceeds after this task acquires the + // in-memory write lock. This requires the DB update to run concurrently, so a new task is + // spawned. + let db = Arc::clone(&self.db); + let db_update_task = tokio::spawn( + async move { db.apply_block(allow_acquire, acquire_done, block, notes).await } + .in_current_span(), + ); + + // Wait for the message from the DB update task, that we ready to commit the DB transaction + acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; + + // Awaiting the block saving task to complete without errors + block_save_task.await??; + + // Scope to update the in-memory data + async move { + // We need to hold the write lock here to prevent inconsistency between the in-memory + // state and the DB state. Thus, we need to wait for the DB update task to complete + // successfully. + let mut inner = self.inner.write().await; + + // We need to check that neither the nullifier tree nor the account tree have changed + // while we were waiting for the DB preparation task to complete. If either of them + // did change, we do not proceed with in-memory and database updates, since it may + // lead to an inconsistent state. + if inner.nullifier_tree.root() != nullifier_tree_old_root + || inner.account_tree.root_latest() != account_tree_old_root + { + return Err(ApplyBlockError::ConcurrentWrite); + } + + // Notify the DB update task that the write lock has been acquired, so it can commit + // the DB transaction + inform_acquire_done + .send(()) + .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; + + // TODO: shutdown #91 + // Await for successful commit of the DB transaction. If the commit fails, we mustn't + // change in-memory state, so we return a block applying error and don't proceed with + // in-memory updates. + db_update_task + .await? + .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; + + // Update the in-memory data structures after successful commit of the DB transaction + inner + .nullifier_tree + .apply_mutations(nullifier_tree_update) + .expect("Unreachable: old nullifier tree root must be checked before this step"); + inner + .account_tree + .apply_mutations(account_tree_update) + .expect("Unreachable: old account tree root must be checked before this step"); + inner.blockchain.push(block_commitment); + + Ok(()) + } + .in_current_span() + .await?; + + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + + info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); + + Ok(()) + } +} diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index b584f37b4..9ef47e860 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -21,24 +21,21 @@ use miden_node_proto::domain::account::{ StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_protocol::Word; -use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain}; use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; -use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; -use miden_protocol::transaction::{OutputNote, PartialBlockchain}; -use miden_protocol::utils::Serializable; -use tokio::sync::{Mutex, RwLock, oneshot}; -use tracing::{Instrument, info, info_span, instrument}; +use miden_protocol::note::{NoteId, NoteScript, Nullifier}; +use miden_protocol::transaction::PartialBlockchain; +use tokio::sync::{Mutex, RwLock}; +use tracing::{info, instrument}; -use crate::accounts::{AccountTreeWithHistory, HistoricalError}; +use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; use crate::db::models::queries::StorageMapValuesPage; @@ -57,7 +54,6 @@ use crate::errors::{ GetBlockHeaderError, GetBlockInputsError, GetCurrentBlockchainDataError, - InvalidBlockError, NoteSyncError, StateInitializationError, StateSyncError, @@ -75,6 +71,8 @@ pub use loader::{ }; use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; +mod apply_block; + // STRUCTURES // ================================================================================================ @@ -190,294 +188,6 @@ impl State { }) } - // STATE MUTATOR - // -------------------------------------------------------------------------------------------- - - /// Apply changes of a new block to the DB and in-memory data structures. - /// - /// ## Note on state consistency - /// - /// The server contains in-memory representations of the existing trees, the in-memory - /// representation must be kept consistent with the committed data, this is necessary so to - /// provide consistent results for all endpoints. In order to achieve consistency, the - /// following steps are used: - /// - /// - the request data is validated, prior to starting any modifications. - /// - block is being saved into the store in parallel with updating the DB, but before - /// committing. This block is considered as candidate and not yet available for reading - /// because the latest block pointer is not updated yet. - /// - a transaction is open in the DB and the writes are started. - /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the - /// in-memory representations, which are consistent at this stage. - /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is - /// acquired, preventing concurrent reads to the in-memory data, since that will be - /// out-of-sync w.r.t. the DB. - /// - the DB transaction is committed, and requests that read only from the DB can proceed to - /// use the fresh data. - /// - the in-memory structures are updated, including the latest block pointer and the lock is - /// released. - // TODO: This span is logged in a root span, we should connect it to the parent span. - #[allow(clippy::too_many_lines)] - #[instrument(target = COMPONENT, skip_all, err)] - pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { - let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; - - let header = block.header(); - - let tx_commitment = block.body().transactions().commitment(); - - if header.tx_commitment() != tx_commitment { - return Err(InvalidBlockError::InvalidBlockTxCommitment { - expected: tx_commitment, - actual: header.tx_commitment(), - } - .into()); - } - - let block_num = header.block_num(); - let block_commitment = header.commitment(); - - // ensures the right block header is being processed - let prev_block = self - .db - .select_block_header_by_block_num(None) - .await? - .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; - - let expected_block_num = prev_block.block_num().child(); - if block_num != expected_block_num { - return Err(InvalidBlockError::NewBlockInvalidBlockNum { - expected: expected_block_num, - submitted: block_num, - } - .into()); - } - if header.prev_block_commitment() != prev_block.commitment() { - return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); - } - - let block_data = block.to_bytes(); - - // Save the block to the block store. In a case of a rolled-back DB transaction, the - // in-memory state will be unchanged, but the block might still be written into the - // block store. Thus, such block should be considered as block candidates, but not - // finalized blocks. So we should check for the latest block when getting block from - // the store. - let store = Arc::clone(&self.block_store); - let block_save_task = tokio::spawn( - async move { store.save_block(block_num, &block_data).await }.in_current_span(), - ); - - // scope to read in-memory data, compute mutations required for updating account - // and nullifier trees, and validate the request - let ( - nullifier_tree_old_root, - nullifier_tree_update, - account_tree_old_root, - account_tree_update, - ) = { - let inner = self.inner.read().await; - - let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); - - // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = block - .body() - .created_nullifiers() - .iter() - .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) - .copied() - .collect(); - if !duplicate_nullifiers.is_empty() { - return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); - } - - // compute updates for the in-memory data structures - - // new_block.chain_root must be equal to the chain MMR root prior to the update - let peaks = inner.blockchain.peaks(); - if peaks.hash_peaks() != header.chain_commitment() { - return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); - } - - // compute update for nullifier tree - let nullifier_tree_update = inner - .nullifier_tree - .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), - ) - .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; - - if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { - // We do our best here to notify the serve routine, if it doesn't care (dropped the - // receiver) we can't do much. - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidNullifierRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); - } - - // compute update for account tree - let account_tree_update = inner - .account_tree - .compute_mutations( - block - .body() - .updated_accounts() - .iter() - .map(|update| (update.account_id(), update.final_state_commitment())), - ) - .map_err(|e| match e { - HistoricalError::AccountTreeError(err) => { - InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) - }, - HistoricalError::MerkleError(_) => { - panic!("Unexpected MerkleError during account tree mutation computation") - }, - })?; - - if account_tree_update.as_mutation_set().root() != header.account_root() { - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidAccountRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); - } - - ( - inner.nullifier_tree.root(), - nullifier_tree_update, - inner.account_tree.root_latest(), - account_tree_update, - ) - }; - - // build note tree - let note_tree = block.body().compute_block_note_tree(); - if note_tree.root() != header.note_root() { - return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); - } - - let notes = block - .body() - .output_notes() - .map(|(note_index, note)| { - let (details, nullifier) = match note { - OutputNote::Full(note) => { - (Some(NoteDetails::from(note)), Some(note.nullifier())) - }, - OutputNote::Header(_) => (None, None), - note @ OutputNote::Partial(_) => { - return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( - note.clone(), - ))); - }, - }; - - let inclusion_path = note_tree.open(note_index); - - let note_record = NoteRecord { - block_num, - note_index, - note_id: note.id().as_word(), - note_commitment: note.commitment(), - metadata: note.metadata().clone(), - details, - inclusion_path, - }; - - Ok((note_record, nullifier)) - }) - .collect::, InvalidBlockError>>()?; - - // Signals the transaction is ready to be committed, and the write lock can be acquired - let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); - // Signals the write lock has been acquired, and the transaction can be committed - let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); - - // Extract public account updates with deltas before block is moved into async task. - // Private accounts are filtered out since they don't expose their state changes. - let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { - AccountUpdateDetails::Delta(delta) => Some(delta.clone()), - AccountUpdateDetails::Private => None, - } - })); - - // The DB and in-memory state updates need to be synchronized and are partially - // overlapping. Namely, the DB transaction only proceeds after this task acquires the - // in-memory write lock. This requires the DB update to run concurrently, so a new task is - // spawned. - let db = Arc::clone(&self.db); - let db_update_task = tokio::spawn( - async move { db.apply_block(allow_acquire, acquire_done, block, notes).await } - .in_current_span(), - ); - - // Wait for the message from the DB update task, that we ready to commit the DB transaction - acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; - - // Awaiting the block saving task to complete without errors - block_save_task.await??; - - // Scope to update the in-memory data - async move { - // We need to hold the write lock here to prevent inconsistency between the in-memory - // state and the DB state. Thus, we need to wait for the DB update task to complete - // successfully. - let mut inner = self.inner.write().await; - - // We need to check that neither the nullifier tree nor the account tree have changed - // while we were waiting for the DB preparation task to complete. If either of them - // did change, we do not proceed with in-memory and database updates, since it may - // lead to an inconsistent state. - if inner.nullifier_tree.root() != nullifier_tree_old_root - || inner.account_tree.root_latest() != account_tree_old_root - { - return Err(ApplyBlockError::ConcurrentWrite); - } - - // Notify the DB update task that the write lock has been acquired, so it can commit - // the DB transaction - inform_acquire_done - .send(()) - .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; - - // TODO: shutdown #91 - // Await for successful commit of the DB transaction. If the commit fails, we mustn't - // change in-memory state, so we return a block applying error and don't proceed with - // in-memory updates. - db_update_task - .await? - .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; - - // Update the in-memory data structures after successful commit of the DB transaction - inner - .nullifier_tree - .apply_mutations(nullifier_tree_update) - .expect("Unreachable: old nullifier tree root must be checked before this step"); - inner - .account_tree - .apply_mutations(account_tree_update) - .expect("Unreachable: old account tree root must be checked before this step"); - inner.blockchain.push(block_commitment); - - Ok(()) - } - .in_current_span() - .await?; - - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; - - info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); - - Ok(()) - } - // STATE ACCESSORS // -------------------------------------------------------------------------------------------- From 65bc296a3d70d79c89e8cf744e3d109b9e160403 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare <43513081+bobbinth@users.noreply.github.com> Date: Wed, 28 Jan 2026 08:31:44 -0800 Subject: [PATCH 06/77] refactor: move state-sync related methods to a separate file (#1606) --- crates/store/src/server/rpc_api.rs | 2 +- crates/store/src/state/mod.rs | 132 +------------------------ crates/store/src/state/sync_state.rs | 141 +++++++++++++++++++++++++++ 3 files changed, 145 insertions(+), 130 deletions(-) create mode 100644 crates/store/src/state/sync_state.rs diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 1f9f19aec..fb3924da6 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -327,7 +327,7 @@ impl rpc_server::Rpc for StoreApi { let storage_maps_page = self .state - .get_storage_map_sync_values(account_id, block_range) + .sync_account_storage_maps(account_id, block_range) .await .map_err(SyncAccountStorageMapsError::from)?; diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 9ef47e860..d14ef560c 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -28,7 +28,7 @@ use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain}; -use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::crypto::merkle::mmr::{MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; use miden_protocol::note::{NoteId, NoteScript, Nullifier}; use miden_protocol::transaction::PartialBlockchain; @@ -38,15 +38,7 @@ use tracing::{info, instrument}; use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; -use crate::db::models::queries::StorageMapValuesPage; -use crate::db::{ - AccountVaultValue, - Db, - NoteRecord, - NoteSyncUpdate, - NullifierInfo, - StateSyncUpdate, -}; +use crate::db::{Db, NoteRecord, NullifierInfo}; use crate::errors::{ ApplyBlockError, DatabaseError, @@ -54,9 +46,7 @@ use crate::errors::{ GetBlockHeaderError, GetBlockInputsError, GetCurrentBlockchainDataError, - NoteSyncError, StateInitializationError, - StateSyncError, }; use crate::inner_forest::{InnerForest, WitnessError}; use crate::{COMPONENT, DataDirectory}; @@ -72,6 +62,7 @@ pub use loader::{ use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; mod apply_block; +mod sync_state; // STRUCTURES // ================================================================================================ @@ -216,17 +207,6 @@ impl State { } } - pub async fn sync_nullifiers( - &self, - prefix_len: u32, - nullifier_prefixes: Vec, - block_range: RangeInclusive, - ) -> Result<(Vec, BlockNumber), DatabaseError> { - self.db - .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) - .await - } - /// Generates membership proofs for each one of the `nullifiers` against the latest nullifier /// tree. /// @@ -399,85 +379,6 @@ impl State { }) } - /// Loads data to synchronize a client. - /// - /// The client's request contains a list of note tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filtered based on this - /// block range. - /// - /// # Arguments - /// - /// - `block_num`: The last block *known* by the client, updates start from the next block. - /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's - /// block range. - /// - `note_tags`: The tags the client is interested in, result is restricted to the first block - /// with any matches tags. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_state( - &self, - block_num: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { - let inner = self.inner.read().await; - - let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; - - let delta = if block_num == state_sync.block_header.block_num() { - // The client is in sync with the chain tip. - MmrDelta { - forest: Forest::new(block_num.as_usize()), - data: vec![], - } - } else { - // Important notes about the boundary conditions: - // - // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root - // contained in the block header always lag behind by one block, this is because the Mmr - // leaves are hashes of block headers, and we can't have self-referential hashes. These - // two points cancel out and don't require adjusting. - // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to - // be - // exclusive, so the from_forest has to be adjusted with a +1 - let from_forest = (block_num + 1).as_usize(); - let to_forest = state_sync.block_header.block_num().as_usize(); - inner - .blockchain - .as_mmr() - .get_delta(Forest::new(from_forest), Forest::new(to_forest)) - .map_err(StateSyncError::FailedToBuildMmrDelta)? - }; - - Ok((state_sync, delta)) - } - - /// Loads data to synchronize a client's notes. - /// - /// The client's request contains a list of tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filter based on this - /// block range. - /// - /// # Arguments - /// - /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the - /// first block containing a matching note. - /// - `block_range`: The range of blocks from which to synchronize notes. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_notes( - &self, - note_tags: Vec, - block_range: RangeInclusive, - ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { - let inner = self.inner.read().await; - - let (note_sync, last_included_block) = - self.db.get_note_sync(block_range, note_tags).await?; - - let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; - - Ok((note_sync, mmr_proof, last_included_block)) - } - /// Returns data needed by the block producer to construct and prove the next block. pub async fn get_block_inputs( &self, @@ -853,15 +754,6 @@ impl State { }) } - /// Returns storage map values for syncing within a block range. - pub(crate) async fn get_storage_map_sync_values( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await - } - /// Loads a block from the block store. Return `Ok(None)` if the block is not found. pub async fn load_block( &self, @@ -903,14 +795,6 @@ impl State { self.db.analyze_table_sizes().await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.get_account_vault_sync(account_id, block_range).await - } /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -930,16 +814,6 @@ impl State { self.db.select_note_script_by_root(root).await } - /// Returns the complete transaction records for the specified accounts within the specified - /// block range, including state commitments and note IDs. - pub async fn sync_transactions( - &self, - account_ids: Vec, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.select_transactions_records(account_ids, block_range).await - } - /// Returns vault asset witnesses for the specified account and block number. pub async fn get_vault_asset_witnesses( &self, diff --git a/crates/store/src/state/sync_state.rs b/crates/store/src/state/sync_state.rs new file mode 100644 index 000000000..59d891ebd --- /dev/null +++ b/crates/store/src/state/sync_state.rs @@ -0,0 +1,141 @@ +use std::ops::RangeInclusive; + +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrProof}; +use tracing::instrument; + +use super::State; +use crate::COMPONENT; +use crate::db::models::queries::StorageMapValuesPage; +use crate::db::{AccountVaultValue, NoteSyncUpdate, NullifierInfo, StateSyncUpdate}; +use crate::errors::{DatabaseError, NoteSyncError, StateSyncError}; + +// STATE SYNCHRONIZATION ENDPOINTS +// ================================================================================================ + +impl State { + /// Returns the complete transaction records for the specified accounts within the specified + /// block range, including state commitments and note IDs. + pub async fn sync_transactions( + &self, + account_ids: Vec, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.select_transactions_records(account_ids, block_range).await + } + + /// Loads data to synchronize a client's notes. + /// + /// The client's request contains a list of tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filter based on this + /// block range. + /// + /// # Arguments + /// + /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the + /// first block containing a matching note. + /// - `block_range`: The range of blocks from which to synchronize notes. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_notes( + &self, + note_tags: Vec, + block_range: RangeInclusive, + ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { + let inner = self.inner.read().await; + + let (note_sync, last_included_block) = + self.db.get_note_sync(block_range, note_tags).await?; + + let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; + + Ok((note_sync, mmr_proof, last_included_block)) + } + + pub async fn sync_nullifiers( + &self, + prefix_len: u32, + nullifier_prefixes: Vec, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber), DatabaseError> { + self.db + .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) + .await + } + + // ACCOUNT STATE SYNCHRONIZATION + // -------------------------------------------------------------------------------------------- + + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.get_account_vault_sync(account_id, block_range).await + } + + /// Returns storage map values for syncing within a block range. + pub async fn sync_account_storage_maps( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result { + self.db.select_storage_map_sync_values(account_id, block_range).await + } + + // FULL STATE SYNCHRONIZATION + // -------------------------------------------------------------------------------------------- + + /// Loads data to synchronize a client. + /// + /// The client's request contains a list of note tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filtered based on this + /// block range. + /// + /// # Arguments + /// + /// - `block_num`: The last block *known* by the client, updates start from the next block. + /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's + /// block range. + /// - `note_tags`: The tags the client is interested in, result is restricted to the first block + /// with any matches tags. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_state( + &self, + block_num: BlockNumber, + account_ids: Vec, + note_tags: Vec, + ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { + let inner = self.inner.read().await; + + let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; + + let delta = if block_num == state_sync.block_header.block_num() { + // The client is in sync with the chain tip. + MmrDelta { + forest: Forest::new(block_num.as_usize()), + data: vec![], + } + } else { + // Important notes about the boundary conditions: + // + // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root + // contained in the block header always lag behind by one block, this is because the Mmr + // leaves are hashes of block headers, and we can't have self-referential hashes. These + // two points cancel out and don't require adjusting. + // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to + // be + // exclusive, so the from_forest has to be adjusted with a +1 + let from_forest = (block_num + 1).as_usize(); + let to_forest = state_sync.block_header.block_num().as_usize(); + inner + .blockchain + .as_mmr() + .get_delta(Forest::new(from_forest), Forest::new(to_forest)) + .map_err(StateSyncError::FailedToBuildMmrDelta)? + }; + + Ok((state_sync, delta)) + } +} From 8927fe2dc81e21e6ca5e0276944224d82992f4e2 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 29 Jan 2026 08:32:20 +0100 Subject: [PATCH 07/77] feat: rocksdb linkage of existing static rocksdb library is missing stdc++ (#1607) --- Cargo.lock | 9 ++++++ Cargo.toml | 4 +++ bin/network-monitor/Cargo.toml | 3 ++ bin/network-monitor/build.rs | 3 ++ bin/node/Cargo.toml | 3 ++ bin/node/build.rs | 3 ++ bin/remote-prover/Cargo.toml | 7 +++-- bin/remote-prover/build.rs | 3 +- bin/stress-test/Cargo.toml | 3 ++ bin/stress-test/build.rs | 3 ++ crates/rocksdb-cxx-linkage-fix/Cargo.toml | 19 ++++++++++++ crates/rocksdb-cxx-linkage-fix/src/lib.rs | 36 +++++++++++++++++++++++ crates/store/Cargo.toml | 33 +++++++++++---------- 13 files changed, 109 insertions(+), 20 deletions(-) create mode 100644 bin/network-monitor/build.rs create mode 100644 bin/node/build.rs create mode 100644 bin/stress-test/build.rs create mode 100644 crates/rocksdb-cxx-linkage-fix/Cargo.toml create mode 100644 crates/rocksdb-cxx-linkage-fix/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c338dda5f..de2941f5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2726,6 +2726,7 @@ dependencies = [ "hex", "humantime", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -2755,6 +2756,7 @@ dependencies = [ "humantime", "miden-node-block-producer", "miden-node-ntx-builder", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-rpc", "miden-node-store", "miden-node-utils", @@ -2866,6 +2868,10 @@ dependencies = [ "tonic-prost-build", ] +[[package]] +name = "miden-node-rocksdb-cxx-linkage-fix" +version = "0.14.0" + [[package]] name = "miden-node-rpc" version = "0.14.0" @@ -2916,6 +2922,7 @@ dependencies = [ "miden-crypto", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-test-macro", "miden-node-utils", "miden-protocol", @@ -2947,6 +2954,7 @@ dependencies = [ "miden-block-prover", "miden-node-block-producer", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-store", "miden-node-utils", "miden-protocol", @@ -3101,6 +3109,7 @@ dependencies = [ "miden-block-prover", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", diff --git a/Cargo.toml b/Cargo.toml index 839d354f1..caccabc5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "crates/ntx-builder", "crates/proto", "crates/remote-prover-client", + "crates/rocksdb-cxx-linkage-fix", "crates/rpc", "crates/store", "crates/test-macro", @@ -47,6 +48,9 @@ miden-node-test-macro = { path = "crates/test-macro" } miden-node-utils = { path = "crates/utils", version = "0.14" } miden-node-validator = { path = "crates/validator", version = "0.14" } miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } +# Temporary workaround until +# is part of `rocksdb-rust` release +miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "0.14" } # miden-base aka protocol dependencies. These should be updated in sync. miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 64a1f19e1..0322675b3 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -36,3 +36,6 @@ tokio = { features = ["full"], workspace = true } tonic = { features = ["codegen", "tls-native-roots", "transport"], workspace = true } tracing = { workspace = true } url = { features = ["serde"], workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/network-monitor/build.rs b/bin/network-monitor/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/bin/network-monitor/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index b6ade3b4d..2743f3e8d 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -33,6 +33,9 @@ miden-protocol = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } diff --git a/bin/node/build.rs b/bin/node/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/bin/node/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 85bc355f7..adb60f7a8 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -66,6 +66,7 @@ miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } [build-dependencies] -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { features = ["fancy"], version = "7.5" } -tonic-prost-build = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { features = ["fancy"], version = "7.5" } +tonic-prost-build = { workspace = true } diff --git a/bin/remote-prover/build.rs b/bin/remote-prover/build.rs index f9b2eaafb..262ab49af 100644 --- a/bin/remote-prover/build.rs +++ b/bin/remote-prover/build.rs @@ -12,7 +12,8 @@ const GENERATED_OUT_DIR: &str = "src/generated"; /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); + println!("cargo:rerun-if-env-changed=BUILD_PROTO"); if !BUILD_GENERATED_FILES_IN_SRC { return Ok(()); } diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index b9df84d41..9b96e564a 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -33,3 +33,6 @@ rayon = { version = "1.10" } tokio = { workspace = true } tonic = { default-features = true, workspace = true } url = { workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/stress-test/build.rs b/bin/stress-test/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/bin/stress-test/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/crates/rocksdb-cxx-linkage-fix/Cargo.toml b/crates/rocksdb-cxx-linkage-fix/Cargo.toml new file mode 100644 index 000000000..9e0eb23f7 --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +description = "Miden C++ stdlib link helper" +edition.workspace = true +homepage.workspace = true +license.workspace = true +name = "miden-node-rocksdb-cxx-linkage-fix" +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lib] +path = "src/lib.rs" + +[lints] +workspace = true + +[dependencies] diff --git a/crates/rocksdb-cxx-linkage-fix/src/lib.rs b/crates/rocksdb-cxx-linkage-fix/src/lib.rs new file mode 100644 index 000000000..eeaa456d0 --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/src/lib.rs @@ -0,0 +1,36 @@ +//! A temporary solution to missing c++ std library linkage when using a precompile static library +//! +//! For more information see: + +use std::env; + +pub fn configure() { + println!("cargo:rerun-if-env-changed=ROCKSDB_COMPILE"); + println!("cargo:rerun-if-env-changed=ROCKSDB_STATIC"); + println!("cargo:rerun-if-env-changed=CXXSTDLIB"); + let target = env::var("TARGET").unwrap_or_default(); + if should_link_cpp_stdlib() { + link_cpp_stdlib(&target); + } +} + +fn should_link_cpp_stdlib() -> bool { + let rocksdb_compile = env::var("ROCKSDB_COMPILE").unwrap_or_default(); + let rocksdb_compile_disabled = matches!(rocksdb_compile.as_str(), "0" | "false" | "FALSE"); + let rocksdb_static = env::var("ROCKSDB_STATIC").is_ok(); + + rocksdb_compile_disabled && rocksdb_static +} + +fn link_cpp_stdlib(target: &str) { + if let Ok(stdlib) = env::var("CXXSTDLIB") { + println!("cargo:rustc-link-lib=dylib={stdlib}"); + } else if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { + println!("cargo:rustc-link-lib=dylib=c++"); + } else if target.contains("linux") { + println!("cargo:rustc-link-lib=dylib=stdc++"); + } else if target.contains("aix") { + println!("cargo:rustc-link-lib=dylib=c++"); + println!("cargo:rustc-link-lib=dylib=c++abi"); + } +} diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index dd06567ea..da6680d10 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -15,20 +15,21 @@ version.workspace = true workspace = true [dependencies] -anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } -fs-err = { workspace = true } -hex = { version = "0.4" } -indexmap = { workspace = true } -miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } -miden-node-proto = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-utils = { workspace = true } -miden-standards = { workspace = true } +anyhow = { workspace = true } +deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } +deadpool-diesel = { features = ["sqlite"], version = "0.6" } +deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } +hex = { version = "0.4" } +indexmap = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { optional = true, workspace = true } +miden-node-utils = { workspace = true } +miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } @@ -58,7 +59,7 @@ termtree = { version = "0.5" } [features] default = ["rocksdb"] -rocksdb = ["miden-crypto/rocksdb"] +rocksdb = ["miden-crypto/rocksdb", "miden-node-rocksdb-cxx-linkage-fix"] [[bench]] harness = false @@ -69,4 +70,4 @@ required-features = ["rocksdb"] # This is an indirect dependency for which we need to enable optimisations # via feature flags. Because we don't use it directly in code, machete # identifies it as unused. -ignored = ["miden-crypto"] +ignored = ["miden-crypto", "miden-node-rocksdb-cxx-linkage-fix"] From 27b19fb3c4cc4a7caaaf48e35c9ef70febd26e9f Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 29 Jan 2026 08:57:14 -0300 Subject: [PATCH 08/77] chore: use enum for NoteType (#1594) --- CHANGELOG.md | 3 +++ Cargo.lock | 4 +-- crates/proto/src/domain/note.rs | 33 +++++++++++++++++++++++-- crates/proto/src/generated/note.rs | 39 +++++++++++++++++++++++++++--- proto/proto/types/note.proto | 14 +++++++++-- 5 files changed, 84 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d417968a..4b32519f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,9 @@ ### Enhancements - [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). +### Changes + +- Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). ## v0.13.3 (2026-01-29) diff --git a/Cargo.lock b/Cargo.lock index de2941f5a..e4f5cc489 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3470,7 +3470,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -5486,7 +5486,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 94fea5beb..1f7c9cb0d 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -23,6 +23,33 @@ use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; +// NOTE TYPE +// ================================================================================================ + +impl From for proto::note::NoteType { + fn from(note_type: NoteType) -> Self { + match note_type { + NoteType::Public => proto::note::NoteType::Public, + NoteType::Private => proto::note::NoteType::Private, + } + } +} + +impl TryFrom for NoteType { + type Error = ConversionError; + + fn try_from(note_type: proto::note::NoteType) -> Result { + match note_type { + proto::note::NoteType::Public => Ok(NoteType::Public), + proto::note::NoteType::Private => Ok(NoteType::Private), + proto::note::NoteType::Unspecified => Err(ConversionError::EnumDiscriminantOutOfRange), + } + } +} + +// NOTE METADATA +// ================================================================================================ + impl TryFrom for NoteMetadata { type Error = ConversionError; @@ -31,7 +58,9 @@ impl TryFrom for NoteMetadata { .sender .ok_or_else(|| proto::note::NoteMetadata::missing_field(stringify!(sender)))? .try_into()?; - let note_type = NoteType::try_from(u64::from(value.note_type))?; + let note_type = proto::note::NoteType::try_from(value.note_type) + .map_err(|_| ConversionError::EnumDiscriminantOutOfRange)? + .try_into()?; let tag = NoteTag::new(value.tag); // Deserialize attachment if present @@ -77,7 +106,7 @@ impl From for proto::note::NetworkNote { impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); - let note_type = val.note_type() as u32; + let note_type = proto::note::NoteType::from(val.note_type()) as i32; let tag = val.tag().as_u32(); let attachment = val.attachment().to_bytes(); diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs index 83d56aeb6..8bff5858c 100644 --- a/crates/proto/src/generated/note.rs +++ b/crates/proto/src/generated/note.rs @@ -19,9 +19,9 @@ pub struct NoteMetadata { /// The account which sent the note. #[prost(message, optional, tag = "1")] pub sender: ::core::option::Option, - /// The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - #[prost(uint32, tag = "2")] - pub note_type: u32, + /// The type of the note. + #[prost(enumeration = "NoteType", tag = "2")] + pub note_type: i32, /// A value which can be used by the recipient(s) to identify notes intended for them. /// /// See `miden_protocol::note::note_tag` for more info. @@ -128,3 +128,36 @@ pub struct NoteScript { #[prost(bytes = "vec", tag = "2")] pub mast: ::prost::alloc::vec::Vec, } +/// The type of a note. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NoteType { + /// Unspecified note type (default value, should not be used). + Unspecified = 0, + /// Public note - details are visible on-chain. + Public = 1, + /// Private note - details are not visible on-chain. + Private = 2, +} +impl NoteType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "NOTE_TYPE_UNSPECIFIED", + Self::Public => "NOTE_TYPE_PUBLIC", + Self::Private => "NOTE_TYPE_PRIVATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NOTE_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "NOTE_TYPE_PUBLIC" => Some(Self::Public), + "NOTE_TYPE_PRIVATE" => Some(Self::Private), + _ => None, + } + } +} diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index ac125daa0..ebaa64ed6 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -7,6 +7,16 @@ import "types/account.proto"; // NOTES // ================================================================================================ +// The type of a note. +enum NoteType { + // Unspecified note type (default value, should not be used). + NOTE_TYPE_UNSPECIFIED = 0; + // Public note - details are visible on-chain. + NOTE_TYPE_PUBLIC = 1; + // Private note - details are not visible on-chain. + NOTE_TYPE_PRIVATE = 2; +} + // Represents a note's ID. message NoteId { // A unique identifier of the note which is a 32-byte commitment to the underlying note data. @@ -24,8 +34,8 @@ message NoteMetadata { // The account which sent the note. account.AccountId sender = 1; - // The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - uint32 note_type = 2; + // The type of the note. + NoteType note_type = 2; // A value which can be used by the recipient(s) to identify notes intended for them. // From 1c44ed207355b50c7a49180954e0dca5bcf7e593 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Mon, 2 Feb 2026 11:23:40 +1300 Subject: [PATCH 09/77] feat: Move block proving to the Store (#1579) --- CHANGELOG.md | 4 +- Cargo.lock | 71 +++--- bin/node/src/commands/block_producer.rs | 3 - bin/node/src/commands/bundled.rs | 10 +- bin/node/src/commands/mod.rs | 5 - bin/node/src/commands/store.rs | 9 + bin/remote-prover/src/api/prover.rs | 2 +- bin/stress-test/Cargo.toml | 1 - bin/stress-test/src/seeding/mod.rs | 45 ++-- crates/block-producer/Cargo.toml | 1 - .../block-producer/src/block_builder/mod.rs | 207 +++--------------- crates/block-producer/src/errors.rs | 11 +- crates/block-producer/src/server/mod.rs | 5 +- crates/block-producer/src/store/mod.rs | 14 +- crates/ntx-builder/src/actor/execute.rs | 2 +- crates/ntx-builder/src/actor/mod.rs | 1 - crates/proto/src/domain/block.rs | 87 +++++++- crates/proto/src/generated/blockchain.rs | 14 +- crates/proto/src/generated/store.rs | 19 +- .../src/remote_prover/block_prover.rs | 2 +- crates/rpc/src/tests.rs | 2 + crates/store/Cargo.toml | 4 + .../db/migrations/2025062000000_setup/up.sql | 1 + crates/store/src/db/mod.rs | 14 +- .../src/db/models/queries/accounts/tests.rs | 5 +- .../src/db/models/queries/block_headers.rs | 24 +- crates/store/src/db/models/queries/mod.rs | 4 +- crates/store/src/db/schema.rs | 1 + crates/store/src/db/tests.rs | 12 +- crates/store/src/lib.rs | 1 + crates/store/src/server/api.rs | 41 +++- crates/store/src/server/block_producer.rs | 89 ++++++-- .../store/src/server/block_prover_client.rs | 56 +++++ crates/store/src/server/mod.rs | 21 +- crates/store/src/state/apply_block.rs | 65 +++--- proto/proto/internal/store.proto | 14 +- proto/proto/types/blockchain.proto | 10 +- 37 files changed, 511 insertions(+), 366 deletions(-) create mode 100644 crates/store/src/server/block_prover_client.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b32519f6..cd49c0cdf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ ### Enhancements - [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). +- [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/miden-node/pull/1579)). + ### Changes - Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). @@ -124,7 +126,7 @@ - Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - - This presented as a database locked error and in rare cases a desync between the mempool and store. + - This presented as a database locked error and in rare cases a desync between the mempool and store. ## v0.12.6 (2026-01-12) diff --git a/Cargo.lock b/Cargo.lock index e4f5cc489..e0dd6de99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -351,7 +362,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.10.5", "proc-macro2", "quote", "regex", @@ -1323,7 +1334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -1668,6 +1679,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -2098,7 +2112,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -2122,15 +2136,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.14.0" @@ -2477,7 +2482,7 @@ dependencies = [ [[package]] name = "miden-agglayer" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "fs-err", "miden-assembly", @@ -2545,7 +2550,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2774,7 +2779,6 @@ dependencies = [ "assert_matches", "futures", "itertools 0.14.0", - "miden-block-prover", "miden-node-proto", "miden-node-proto-build", "miden-node-store", @@ -2917,8 +2921,10 @@ dependencies = [ "diesel", "diesel_migrations", "fs-err", + "futures", "hex", "indexmap 2.13.0", + "miden-block-prover", "miden-crypto", "miden-node-proto", "miden-node-proto-build", @@ -2926,6 +2932,7 @@ dependencies = [ "miden-node-test-macro", "miden-node-utils", "miden-protocol", + "miden-remote-prover-client", "miden-standards", "pretty_assertions", "rand 0.9.2", @@ -2941,6 +2948,7 @@ dependencies = [ "tonic-reflection", "tower-http", "tracing", + "url", ] [[package]] @@ -2951,7 +2959,6 @@ dependencies = [ "fs-err", "futures", "miden-air", - "miden-block-prover", "miden-node-block-producer", "miden-node-proto", "miden-node-rocksdb-cxx-linkage-fix", @@ -3044,7 +3051,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "bech32", "fs-err", @@ -3074,7 +3081,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "proc-macro2", "quote", @@ -3165,7 +3172,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "fs-err", "miden-assembly", @@ -3182,7 +3189,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3204,7 +3211,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-processor", "miden-protocol", @@ -3217,7 +3224,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#e08faf7d7badd292a06a1e757aba4f562733f1c0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", "miden-tx", @@ -3857,7 +3864,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3893,7 +3900,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -3983,7 +3990,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -4015,7 +4022,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.1", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] @@ -4357,7 +4364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck 0.5.0", - "itertools 0.14.0", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -4379,7 +4386,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.10.5", "proc-macro2", "quote", "syn 2.0.114", @@ -4832,7 +4839,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4845,7 +4852,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -5477,7 +5484,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6450,7 +6457,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 5cfbc78fc..5d416ea8e 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -86,7 +86,6 @@ impl BlockProducerCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, @@ -125,7 +124,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, @@ -149,7 +147,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: miden_protocol::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 22f1199a3..d3940d454 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -20,6 +20,7 @@ use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, @@ -68,6 +69,10 @@ pub enum BundledCommand { #[arg(long = "rpc.url", env = ENV_RPC_URL, value_name = "URL")] rpc_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which the Store component should store the database and raw block data. #[arg(long = "data-directory", env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -129,6 +134,7 @@ impl BundledCommand { }, BundledCommand::Start { rpc_url, + block_prover_url, data_directory, block_producer, ntx_builder, @@ -140,6 +146,7 @@ impl BundledCommand { let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; Self::start( rpc_url, + block_prover_url, data_directory, ntx_builder, block_producer, @@ -154,6 +161,7 @@ impl BundledCommand { #[allow(clippy::too_many_lines)] async fn start( rpc_url: Url, + block_prover_url: Option, data_directory: PathBuf, ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, @@ -212,6 +220,7 @@ impl BundledCommand { block_producer_listener: store_block_producer_listener, ntx_builder_listener: store_ntx_builder_listener, data_directory: data_directory_clone, + block_prover_url, grpc_timeout, } .serve() @@ -235,7 +244,6 @@ impl BundledCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_batches_per_block: block_producer.max_batches_per_block, diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 7e8fa7e69..62a288664 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -103,11 +103,6 @@ pub struct BlockProducerConfig { #[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL, value_name = "URL")] pub batch_prover_url: Option, - /// The remote block prover's gRPC url. If unset, will default to running a prover - /// in-process which is expensive. - #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] - pub block_prover_url: Option, - /// The number of transactions per batch. #[arg( long = "max-txs-per-batch", diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 9dd311368..a78655cd9 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -17,6 +17,7 @@ use super::{ }; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, @@ -72,6 +73,10 @@ pub enum StoreCommand { #[arg(long = "block-producer.url", env = ENV_STORE_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which to store the database and raw block data. #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -115,6 +120,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, enable_otel: _, grpc_timeout, @@ -123,6 +129,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, grpc_timeout, ) @@ -143,6 +150,7 @@ impl StoreCommand { rpc_url: Url, ntx_builder_url: Url, block_producer_url: Url, + block_prover_url: Option, data_directory: PathBuf, grpc_timeout: Duration, ) -> anyhow::Result<()> { @@ -169,6 +177,7 @@ impl StoreCommand { Store { rpc_listener, + block_prover_url, ntx_builder_listener, block_producer_listener, data_directory, diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index 24a70f731..d9d8e8c06 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -180,7 +180,7 @@ impl ProverRpcApi { let block_proof = prover .try_lock() .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(tx_batches, block_header, block_inputs) + .prove(tx_batches, &block_header, block_inputs) .map_err(internal_error)?; Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index 9b96e564a..9c3fe9387 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -21,7 +21,6 @@ clap = { features = ["derive"], version = "4.5" } fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } -miden-block-prover = { features = ["testing"], workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } miden-node-store = { workspace = true } diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index e8cfd3395..fa751e1a2 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -5,7 +5,6 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; -use miden_block_prover::LocalBlockProver; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; @@ -30,6 +29,7 @@ use miden_protocol::block::{ FeeParameters, ProposedBlock, ProvenBlock, + SignedBlock, }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; @@ -161,7 +161,7 @@ async fn generate_blocks( SecretKey::with_rng(&mut *rng) }; - let mut prev_block = genesis_block.clone(); + let mut prev_block_header = genesis_block.header().clone(); let mut current_anchor_header = genesis_block.header().clone(); for i in 0..total_blocks { @@ -193,7 +193,7 @@ async fn generate_blocks( note_nullifiers.extend(notes.iter().map(|n| n.nullifier().prefix())); // create the tx that creates the notes - let emit_note_tx = create_emit_note_tx(prev_block.header(), &mut faucet, notes.clone()); + let emit_note_tx = create_emit_note_tx(&prev_block_header, &mut faucet, notes.clone()); // collect all the txs block_txs.push(emit_note_tx); @@ -202,27 +202,23 @@ async fn generate_blocks( // create the batches with [TRANSACTIONS_PER_BATCH] txs each let batches: Vec = block_txs .par_chunks(TRANSACTIONS_PER_BATCH) - .map(|txs| create_batch(txs, prev_block.header())) + .map(|txs| create_batch(txs, &prev_block_header)) .collect(); // create the block and send it to the store let block_inputs = get_block_inputs(store_client, &batches, &mut metrics).await; // update blocks - prev_block = apply_block(batches, block_inputs, store_client, &mut metrics).await; - if current_anchor_header.block_epoch() != prev_block.header().block_epoch() { - current_anchor_header = prev_block.header().clone(); + prev_block_header = apply_block(batches, block_inputs, store_client, &mut metrics).await; + if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { + current_anchor_header = prev_block_header.clone(); } // create the consume notes txs to be used in the next block let batch_inputs = - get_batch_inputs(store_client, prev_block.header(), ¬es, &mut metrics).await; - consume_notes_txs = create_consume_note_txs( - prev_block.header(), - accounts, - notes, - &batch_inputs.note_proofs, - ); + get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; + consume_notes_txs = + create_consume_note_txs(&prev_block_header, accounts, notes, &batch_inputs.note_proofs); // track store size every 50 blocks if i % 50 == 0 { @@ -248,21 +244,21 @@ async fn apply_block( block_inputs: BlockInputs, store_client: &StoreClient, metrics: &mut SeedingMetrics, -) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); +) -> BlockHeader { + let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); let (header, body) = proposed_block.clone().into_header_and_body().unwrap(); - let block_proof = LocalBlockProver::new(0) - .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) - .unwrap(); + let block_size: usize = header.to_bytes().len() + body.to_bytes().len(); let signature = EcdsaSecretKey::new().sign(header.commitment()); - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - let block_size: usize = proven_block.to_bytes().len(); + // SAFETY: The header, body, and signature are known to correspond to each other. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + let ordered_batches = proposed_block.batches().clone(); let start = Instant::now(); - store_client.apply_block(&proven_block).await.unwrap(); + store_client.apply_block(&ordered_batches, &signed_block).await.unwrap(); metrics.track_block_insertion(start.elapsed(), block_size); - proven_block + let (header, ..) = signed_block.into_parts(); + header } // HELPER FUNCTIONS @@ -522,6 +518,8 @@ async fn get_block_inputs( /// Runs the store with the given data directory. Returns a tuple with: /// - a gRPC client to access the store /// - the URL of the store +/// +/// The store uses a local prover. pub async fn start_store( data_directory: PathBuf, ) -> (RpcClient>, Url) { @@ -543,6 +541,7 @@ pub async fn start_store( task::spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index e5e5511ad..8437dab3c 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -22,7 +22,6 @@ tracing-forest = ["miden-node-utils/tracing-forest"] anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } -miden-block-prover = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index a3a36ec4f..56b5a3666 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,29 +1,15 @@ -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::sync::Arc; use anyhow::Context; use futures::FutureExt; -use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{OrderedBatches, ProvenBatch}; -use miden_protocol::block::{ - BlockBody, - BlockHeader, - BlockInputs, - BlockNumber, - BlockProof, - ProposedBlock, - ProvenBlock, -}; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock, SignedBlock}; use miden_protocol::note::NoteHeader; -use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; -use rand::Rng; +use miden_protocol::transaction::TransactionHeader; use tokio::time::Duration; -use tracing::{Span, info, instrument}; -use url::Url; +use tracing::{Span, instrument}; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; @@ -35,21 +21,19 @@ use crate::{COMPONENT, TelemetryInjectorExt}; // ================================================================================================= pub struct BlockBuilder { + /// The frequency at which blocks are produced. pub block_interval: Duration, - /// Used to simulate block proving by sleeping for a random duration selected from this range. - pub simulated_proof_time: Range, /// Simulated block failure rate as a percentage. /// /// Note: this _must_ be sign positive and less than 1.0. pub failure_rate: f64, + /// The store RPC client for committing blocks. pub store: StoreClient, + /// The validator RPC client for validating blocks. pub validator: BlockProducerValidatorClient, - - /// The prover used to prove a proposed block into a proven block. - pub block_prover: BlockProver, } impl BlockBuilder { @@ -59,20 +43,12 @@ impl BlockBuilder { pub fn new( store: StoreClient, validator: BlockProducerValidatorClient, - block_prover_url: Option, block_interval: Duration, ) -> Self { - let block_prover = match block_prover_url { - Some(url) => BlockProver::new_remote(url), - None => BlockProver::new_local(MIN_PROOF_SECURITY_LEVEL), - }; - Self { block_interval, // Note: The range cannot be empty. - simulated_proof_time: Duration::ZERO..Duration::from_millis(1), failure_rate: 0.0, - block_prover, store, validator, } @@ -136,16 +112,11 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(|(proposed_block, _)| { + .inspect_ok(|proposed_block| { ProposedBlock::inject_telemetry(proposed_block); }) - .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) - .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) - .inspect_ok(ProvenBlock::inject_telemetry) - // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot - // handle errors after it considers the process complete (which makes sense). - .and_then(|proven_block| async { self.inject_failure(proven_block) }) - .and_then(|proven_block| self.commit_block(mempool, proven_block)) + .and_then(|proposed_block| self.build_and_validate_block(proposed_block)) + .and_then(|(ordered_batches, signed_block)| self.commit_block(mempool, ordered_batches, signed_block)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) .or_else(|err| async { @@ -239,23 +210,21 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { + ) -> Result { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = ProposedBlock::new(inputs.clone(), batches) - .map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = + ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; - Ok((proposed_block, inputs)) + Ok(proposed_block) } #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] - async fn validate_block( + async fn build_and_validate_block( &self, proposed_block: ProposedBlock, - block_inputs: BlockInputs, - ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> - { + ) -> Result<(OrderedBatches, SignedBlock), BuildBlockError> { // Concurrently build the block and validate it via the validator. let build_result = tokio::task::spawn_blocking({ let proposed_block = proposed_block.clone(); @@ -278,53 +247,27 @@ impl BlockBuilder { } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, header, signature, body)) - } - - #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] - async fn prove_block( - &self, - ordered_batches: OrderedBatches, - block_inputs: BlockInputs, - header: BlockHeader, - signature: Signature, - body: BlockBody, - ) -> Result { - // Prove block using header and body from validator. - let block_proof = self - .block_prover - .prove(ordered_batches.clone(), header.clone(), block_inputs) - .await?; - self.simulate_proving().await; - - // SAFETY: The header and body are assumed valid and consistent with the proof. - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { - return Err(BuildBlockError::SecurityLevelTooLow( - proven_block.proof_security_level(), - MIN_PROOF_SECURITY_LEVEL, - )); - } - // TODO(sergerad): Consider removing this validation. Once block proving is implemented, - // this would be replaced with verifying the proof returned from the prover against - // the block header. - validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; - - Ok(proven_block) + // SAFETY: The header, body, and signature are known to correspond to each other because the + // header and body are derived from the proposed block and the signature is verified + // against the corresponding commitment. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + Ok((ordered_batches, signed_block)) } #[instrument(target = COMPONENT, name = "block_builder.commit_block", skip_all, err)] async fn commit_block( &self, mempool: &SharedMempool, - built_block: ProvenBlock, + ordered_batches: OrderedBatches, + signed_block: SignedBlock, ) -> Result<(), BuildBlockError> { self.store - .apply_block(&built_block) + .apply_block(&ordered_batches, &signed_block) .await .map_err(BuildBlockError::StoreApplyBlockFailed)?; - mempool.lock().await.commit_block(built_block.header().clone()); + let (header, ..) = signed_block.into_parts(); + mempool.lock().await.commit_block(header); Ok(()) } @@ -333,31 +276,6 @@ impl BlockBuilder { async fn rollback_block(&self, mempool: &SharedMempool, block: BlockNumber) { mempool.lock().await.rollback_block(block); } - - #[instrument(target = COMPONENT, name = "block_builder.simulate_proving", skip_all)] - async fn simulate_proving(&self) { - let proving_duration = rand::rng().random_range(self.simulated_proof_time.clone()); - - Span::current().set_attribute("range.min_s", self.simulated_proof_time.start); - Span::current().set_attribute("range.max_s", self.simulated_proof_time.end); - Span::current().set_attribute("dice_roll_s", proving_duration); - - tokio::time::sleep(proving_duration).await; - } - - #[instrument(target = COMPONENT, name = "block_builder.inject_failure", skip_all, err)] - fn inject_failure(&self, value: T) -> Result { - let roll = rand::rng().random::(); - - Span::current().set_attribute("failure_rate", self.failure_rate); - Span::current().set_attribute("dice_roll", roll); - - if roll < self.failure_rate { - Err(BuildBlockError::InjectedFailure) - } else { - Ok(value) - } - } } /// A wrapper around batches selected for inlucion in a block, primarily used to be able to inject @@ -454,76 +372,3 @@ impl TelemetryInjectorExt for ProvenBlock { span.set_attribute("block.commitments.transaction", header.tx_commitment()); } } - -// BLOCK PROVER -// ================================================================================================ - -pub enum BlockProver { - Local(LocalBlockProver), - Remote(RemoteBlockProver), -} - -impl BlockProver { - pub fn new_local(security_level: u32) -> Self { - info!(target: COMPONENT, "Using local block prover"); - Self::Local(LocalBlockProver::new(security_level)) - } - - pub fn new_remote(endpoint: impl Into) -> Self { - info!(target: COMPONENT, "Using remote block prover"); - Self::Remote(RemoteBlockProver::new(endpoint)) - } - - #[instrument(target = COMPONENT, skip_all, err)] - async fn prove( - &self, - tx_batches: OrderedBatches, - block_header: BlockHeader, - block_inputs: BlockInputs, - ) -> Result { - match self { - Self::Local(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .map_err(BuildBlockError::ProveBlockFailed), - Self::Remote(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .await - .map_err(BuildBlockError::RemoteProverClientError), - } - } -} - -/// Validates that the proven block's transaction headers are consistent with the transactions -/// passed in the proposed block. -/// -/// This expects that transactions from the proposed block and proven block are in the same -/// order, as defined by [`OrderedTransactionHeaders`]. -fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, -) -> Result<(), BuildBlockError> { - if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { - return Err(BuildBlockError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.body().transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in proposed_txs - .as_slice() - .iter() - .zip(proven_block.body().transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(BuildBlockError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) -} diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 40c74c99f..b610b0534 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,6 +1,5 @@ use core::error::Error as CoreError; -use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -223,16 +222,10 @@ pub enum BuildBlockError { ValidateBlockFailed(#[source] Box), #[error("block signature is invalid")] InvalidSignature, - #[error("failed to prove block")] - ProveBlockFailed(#[source] BlockProverError), + /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. - #[error("nothing actually went wrong, failure was injected on purpose")] - InjectedFailure, - #[error("failed to prove block with remote prover")] - RemoteProverClientError(#[source] RemoteProverClientError), - #[error("block proof security level is too low: {0} < {1}")] - SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. #[error("{error_msg}")] Other { diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 8245c1ee6..d3519eb00 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -55,8 +55,6 @@ pub struct BlockProducer { pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, - /// The address of the block prover component. - pub block_prover_url: Option, /// The interval at which to produce batches. pub batch_interval: Duration, /// The interval at which to produce blocks. @@ -123,8 +121,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); - let block_builder = - BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); + let block_builder = BlockBuilder::new(store.clone(), validator, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index a82a60582..fb20bc160 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -10,7 +10,8 @@ use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, SignedBlock}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::Serializable; @@ -238,8 +239,15 @@ impl StoreClient { } #[instrument(target = COMPONENT, name = "store.client.apply_block", skip_all, err)] - pub async fn apply_block(&self, block: &ProvenBlock) -> Result<(), StoreError> { - let request = tonic::Request::new(proto::blockchain::Block { block: block.to_bytes() }); + pub async fn apply_block( + &self, + ordered_batches: &OrderedBatches, + signed_block: &SignedBlock, + ) -> Result<(), StoreError> { + let request = tonic::Request::new(proto::store::ApplyBlockRequest { + ordered_batches: ordered_batches.to_bytes(), + block: Some(signed_block.into()), + }); self.client.clone().apply_block(request).await.map(|_| ()).map_err(Into::into) } diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index edcf58c07..f90da19ab 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -83,7 +83,7 @@ type NtxResult = Result; /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - /// TODO(sergerad): Remove block producer client when block proving moved to store. + /// Client for submitting proven transactions to the Block Producer. block_producer: BlockProducerClient, /// Client for validating transactions via the Validator. diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index ae8f63629..74d8cb952 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -156,7 +156,6 @@ pub struct AccountActor { mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, - // TODO(sergerad): Remove block producer when block proving moved to store. block_producer: BlockProducerClient, validator: ValidatorClient, prover: Option, diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index aa94f306d..112f84e50 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -3,7 +3,14 @@ use std::ops::RangeInclusive; use miden_protocol::account::AccountId; use miden_protocol::block::nullifier_tree::NullifierWitness; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; +use miden_protocol::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + FeeParameters, + SignedBlock, +}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_protocol::note::{NoteId, NoteInclusionProof}; use miden_protocol::transaction::PartialBlockchain; @@ -115,6 +122,84 @@ impl TryFrom for BlockHeader { } } +// BLOCK BODY +// ================================================================================================ + +impl From<&BlockBody> for proto::blockchain::BlockBody { + fn from(body: &BlockBody) -> Self { + Self { block_body: body.to_bytes() } + } +} + +impl From for proto::blockchain::BlockBody { + fn from(body: BlockBody) -> Self { + (&body).into() + } +} + +impl TryFrom<&proto::blockchain::BlockBody> for BlockBody { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::BlockBody) -> Result { + value.try_into() + } +} + +impl TryFrom for BlockBody { + type Error = ConversionError; + fn try_from(value: proto::blockchain::BlockBody) -> Result { + BlockBody::read_from_bytes(&value.block_body) + .map_err(|source| ConversionError::deserialization_error("BlockBody", source)) + } +} + +// SIGNED BLOCK +// ================================================================================================ + +impl From<&SignedBlock> for proto::blockchain::SignedBlock { + fn from(block: &SignedBlock) -> Self { + Self { + header: Some(block.header().into()), + body: Some(block.body().into()), + signature: Some(block.signature().into()), + } + } +} + +impl From for proto::blockchain::SignedBlock { + fn from(block: SignedBlock) -> Self { + (&block).into() + } +} + +impl TryFrom<&proto::blockchain::SignedBlock> for SignedBlock { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::SignedBlock) -> Result { + value.try_into() + } +} + +impl TryFrom for SignedBlock { + type Error = ConversionError; + fn try_from(value: proto::blockchain::SignedBlock) -> Result { + let header = value + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + let body = value + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + let signature = value + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + Ok(SignedBlock::new_unchecked(header, body, signature)) + } +} + // BLOCK INPUTS // ================================================================================================ diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 69bbe2e28..135d763e1 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -1,11 +1,13 @@ // This file is @generated by prost-build. -/// Represents a block. +/// Represents a signed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Block { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::Block\]. - #[prost(bytes = "vec", tag = "1")] - pub block: ::prost::alloc::vec::Vec, +pub struct SignedBlock { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub body: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub signature: ::core::option::Option, } /// Represents a proposed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index be9d1d646..5fad016e1 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -1,4 +1,15 @@ // This file is @generated by prost-build. +/// Applies a block to the state. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ApplyBlockRequest { + /// Ordered batches encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::batch::OrderedBatches\]. + #[prost(bytes = "vec", tag = "1")] + pub ordered_batches: ::prost::alloc::vec::Vec, + /// Block signed by the Validator. + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} /// Returns data required to prove the next block. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockInputsRequest { @@ -1707,7 +1718,7 @@ pub mod block_producer_client { /// Applies changes of a new block to the DB and in-memory data structures. pub async fn apply_block( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() @@ -1843,7 +1854,7 @@ pub mod block_producer_server { /// Applies changes of a new block to the DB and in-memory data structures. async fn apply_block( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Retrieves block header by given block number. Optionally, it also returns the MMR path /// and current chain length to authenticate the block's inclusion. @@ -1955,7 +1966,7 @@ pub mod block_producer_server { struct ApplyBlockSvc(pub Arc); impl< T: BlockProducer, - > tonic::server::UnaryService + > tonic::server::UnaryService for ApplyBlockSvc { type Response = (); type Future = BoxFuture< @@ -1964,7 +1975,7 @@ pub mod block_producer_server { >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index d1fa43548..c1562e597 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -105,7 +105,7 @@ impl RemoteBlockProver { pub async fn prove( &self, tx_batches: OrderedBatches, - block_header: BlockHeader, + block_header: &BlockHeader, block_inputs: BlockInputs, ) -> Result { use miden_protocol::utils::Serializable; diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index b35fe8b6d..559d5b6aa 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -439,6 +439,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, @@ -479,6 +480,7 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index da6680d10..ba3f1fd47 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -22,13 +22,16 @@ deadpool-sync = { default-features = false, features = ["tr diesel = { features = ["numeric", "sqlite"], version = "2.3" } diesel_migrations = { features = ["sqlite"], version = "2.3" } fs-err = { workspace = true } +futures = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } +miden-block-prover = { workspace = true } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-rocksdb-cxx-linkage-fix = { optional = true, workspace = true } miden-node-utils = { workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } @@ -44,6 +47,7 @@ tonic = { default-features = true, workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } +url = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index f30a34a51..40491d4d5 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -1,6 +1,7 @@ CREATE TABLE block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, + signature BLOB NOT NULL, PRIMARY KEY (block_num), CONSTRAINT block_header_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab..2ea19ea35 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -10,7 +10,7 @@ use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; use miden_protocol::asset::{Asset, AssetVaultKey}; -use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, SignedBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ NoteDetails, @@ -249,6 +249,7 @@ impl Db { models::queries::apply_block( conn, genesis.header(), + genesis.signature(), &[], &[], genesis.body().updated_accounts(), @@ -566,17 +567,18 @@ impl Db { &self, allow_acquire: oneshot::Sender<()>, acquire_done: oneshot::Receiver<()>, - block: ProvenBlock, + signed_block: SignedBlock, notes: Vec<(NoteRecord, Option)>, ) -> Result<()> { self.transact("apply block", move |conn| -> Result<()> { models::queries::apply_block( conn, - block.header(), + signed_block.header(), + signed_block.signature(), ¬es, - block.body().created_nullifiers(), - block.body().updated_accounts(), - block.body().transactions(), + signed_block.body().created_nullifiers(), + signed_block.body().updated_accounts(), + signed_block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 9206311a1..2a4bf4078 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -161,6 +161,7 @@ fn create_test_account_with_storage() -> (Account, AccountId) { fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { use crate::db::schema::block_headers; + let secret_key = SecretKey::new(); let block_header = BlockHeader::new( 1_u8.into(), Word::default(), @@ -171,15 +172,17 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { Word::default(), Word::default(), Word::default(), - SecretKey::new().public_key(), + secret_key.public_key(), test_fee_params(), 0_u8.into(), ); + let signature = secret_key.sign(block_header.commitment()); diesel::insert_into(block_headers::table) .values(( block_headers::block_num.eq(i64::from(block_num.as_u32())), block_headers::block_header.eq(block_header.to_bytes()), + block_headers::signature.eq(signature.to_bytes()), )) .execute(conn) .expect("Failed to insert block header"); diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 3c295c72b..2b42b40a3 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,6 +11,7 @@ use diesel::{ SelectableHelper, SqliteConnection, }; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; @@ -131,6 +132,7 @@ pub struct BlockHeaderRawRow { #[allow(dead_code)] pub block_num: i64, pub block_header: Vec, + pub signature: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; @@ -140,18 +142,29 @@ impl TryInto for BlockHeaderRawRow { } } +impl TryInto<(BlockHeader, Signature)> for BlockHeaderRawRow { + type Error = DatabaseError; + fn try_into(self) -> Result<(BlockHeader, Signature), Self::Error> { + let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let signature = Signature::read_from_bytes(&self.signature[..])?; + Ok((block_header, signature)) + } +} + #[derive(Debug, Clone, Insertable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, + pub signature: Vec, } -impl From<&BlockHeader> for BlockHeaderInsert { - fn from(block_header: &BlockHeader) -> Self { +impl From<(&BlockHeader, &Signature)> for BlockHeaderInsert { + fn from(from: (&BlockHeader, &Signature)) -> Self { Self { - block_num: block_header.block_num().to_raw_sql(), - block_header: block_header.to_bytes(), + block_num: from.0.block_num().to_raw_sql(), + block_header: from.0.to_bytes(), + signature: from.1.to_bytes(), } } } @@ -174,8 +187,9 @@ impl From<&BlockHeader> for BlockHeaderInsert { pub(crate) fn insert_block_header( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, ) -> Result { - let block_header = BlockHeaderInsert::from(block_header); + let block_header = BlockHeaderInsert::from((block_header, signature)); let count = diesel::insert_into(schema::block_headers::table) .values(&[block_header]) .execute(conn)?; diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0f29b0015..fe603a876 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -31,6 +31,7 @@ )] use diesel::SqliteConnection; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::account::AccountId; use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::note::Nullifier; @@ -59,6 +60,7 @@ pub(crate) use notes::*; pub(crate) fn apply_block( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, notes: &[(NoteRecord, Option)], nullifiers: &[Nullifier], accounts: &[BlockAccountUpdate], @@ -66,7 +68,7 @@ pub(crate) fn apply_block( ) -> Result { let mut count = 0; // Note: ordering here is important as the relevant tables have FK dependencies. - count += insert_block_header(conn, block_header)?; + count += insert_block_header(conn, block_header, signature)?; count += upsert_accounts(conn, accounts, block_header.block_num())?; count += insert_scripts(conn, notes.iter().map(|(note, _)| note))?; count += insert_notes(conn, notes)?; diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index e14d510c1..ebb8c280f 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -47,6 +47,7 @@ diesel::table! { block_headers (block_num) { block_num -> BigInt, block_header -> Binary, + signature -> Binary, } } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index fbc929564..f6cb0c328 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -101,7 +101,8 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { 11_u8.into(), ); - conn.transaction(|conn| queries::insert_block_header(conn, &block_header)) + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + conn.transaction(|conn| queries::insert_block_header(conn, &block_header, &dummy_signature)) .unwrap(); } @@ -767,7 +768,8 @@ fn db_block_header() { ); // test insertion - queries::insert_block_header(conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(conn, &block_header, &dummy_signature).unwrap(); // test fetch unknown block header let block_number = 1; @@ -798,7 +800,8 @@ fn db_block_header() { 21_u8.into(), ); - queries::insert_block_header(conn, &block_header2).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header2.commitment()); + queries::insert_block_header(conn, &block_header2, &dummy_signature).unwrap(); let res = queries::select_block_header_by_block_num(conn, None).unwrap(); assert_eq!(res.unwrap(), block_header2); @@ -1872,7 +1875,8 @@ fn db_roundtrip_block_header() { ); // Insert - queries::insert_block_header(&mut conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(&mut conn, &block_header, &dummy_signature).unwrap(); // Retrieve let retrieved = diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 1d345dcf0..1cc028ac3 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -11,6 +11,7 @@ pub mod state; pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; +pub use server::block_prover_client::BlockProver; pub use server::{DataDirectory, Store}; // CONSTANTS diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 292842e77..63e0f5675 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -6,13 +6,15 @@ use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::BlockNumber; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockInputs, BlockNumber}; use miden_protocol::note::Nullifier; use tonic::{Request, Response, Status}; use tracing::{info, instrument}; -use crate::COMPONENT; +use crate::errors::GetBlockInputsError; use crate::state::State; +use crate::{BlockProver, COMPONENT}; // STORE API // ================================================================================================ @@ -20,6 +22,7 @@ use crate::state::State; #[derive(Clone)] pub struct StoreApi { pub(super) state: Arc, + pub(super) block_prover: Arc, } impl StoreApi { @@ -43,6 +46,40 @@ impl StoreApi { mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), })) } + + /// Retrieves block inputs from state based on the contents of the supplied ordered batches. + pub(crate) async fn block_inputs_from_ordered_batches( + &self, + batches: &OrderedBatches, + ) -> Result { + // Construct fields required to retrieve block inputs. + let mut account_ids = BTreeSet::new(); + let mut nullifiers = Vec::new(); + let mut unauthenticated_note_commitments = BTreeSet::new(); + let mut reference_blocks = BTreeSet::new(); + + for batch in batches.as_slice() { + account_ids.extend(batch.updated_accounts()); + nullifiers.extend(batch.created_nullifiers()); + reference_blocks.insert(batch.reference_block_num()); + + for note in batch.input_notes().iter() { + if let Some(header) = note.header() { + unauthenticated_note_commitments.insert(header.commitment()); + } + } + } + + // Retrieve block inputs from the store. + self.state + .get_block_inputs( + account_ids.into_iter().collect(), + nullifiers, + unauthenticated_note_commitments, + reference_blocks, + ) + .await + } } // UTILITIES diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 9dd2b39c4..25f6b05f6 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,12 +1,16 @@ use std::convert::Infallible; +use futures::TryFutureExt; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; -use miden_protocol::block::{BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockBody, BlockHeader, BlockNumber, SignedBlock}; use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; use tracing::Instrument; @@ -40,33 +44,69 @@ impl block_producer_server::BlockProducer for StoreApi { /// Updates the local DB by inserting a new block header and the related data. async fn apply_block( &self, - request: Request, + request: Request, ) -> Result, Status> { let request = request.into_inner(); - - let block = ProvenBlock::read_from_bytes(&request.block).map_err(|err| { - Status::invalid_argument(err.as_report_context("block deserialization error")) - })?; + // Read ordered batches. + let ordered_batches = + OrderedBatches::read_from_bytes(&request.ordered_batches).map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to deserialize ordered batches"), + ) + })?; + // Read block. + let block = request + .block + .ok_or(proto::store::ApplyBlockRequest::missing_field(stringify!(block)))?; + // Read block header. + let header: BlockHeader = block + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + // Read block body. + let body: BlockBody = block + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + // Read signature. + let signature: Signature = block + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + // Get block inputs from ordered batches. + let block_inputs = + self.block_inputs_from_ordered_batches(&ordered_batches).await.map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to get block inputs from ordered batches"), + ) + })?; let span = tracing::Span::current(); - span.set_attribute("block.number", block.header().block_num()); - span.set_attribute("block.commitment", block.header().commitment()); - span.set_attribute("block.accounts.count", block.body().updated_accounts().len()); - span.set_attribute("block.output_notes.count", block.body().output_notes().count()); - span.set_attribute("block.nullifiers.count", block.body().created_nullifiers().len()); - - // We perform the apply_block work in a separate task. This prevents the caller cancelling - // the request and thereby cancelling the task at an arbitrary point of execution. + span.set_attribute("block.number", header.block_num()); + span.set_attribute("block.commitment", header.commitment()); + span.set_attribute("block.accounts.count", body.updated_accounts().len()); + span.set_attribute("block.output_notes.count", body.output_notes().count()); + span.set_attribute("block.nullifiers.count", body.created_nullifiers().len()); + + // We perform the apply/prove block work in a separate task. This prevents the caller + // cancelling the request and thereby cancelling the task at an arbitrary point of + // execution. // // Normally this shouldn't be a problem, however our apply_block isn't quite ACID compliant // so things get a bit messy. This is more a temporary hack-around to minimize this risk. let this = self.clone(); - tokio::spawn( + // TODO(sergerad): Use block proof. + let _block_proof = tokio::spawn( async move { + // SAFETY: The header, body, and signature are assumed to + // correspond to each other because they are provided by the Block + // Producer. + let signed_block = SignedBlock::new_unchecked(header.clone(), body, signature); // TODO(sergerad): Use `SignedBlock::new()` when available. + // Note: This is an internal endpoint, so its safe to expose the full error + // report. this.state - .apply_block(block) - .await - .map(Response::new) + .apply_block(signed_block) .inspect_err(|err| { span.set_error(err); }) @@ -75,11 +115,15 @@ impl block_producer_server::BlockProducer for StoreApi { ApplyBlockError::InvalidBlockError(_) => tonic::Code::InvalidArgument, _ => tonic::Code::Internal, }; - - // This is an internal endpoint, so its safe to expose the full error - // report. Status::new(code, err.as_report()) }) + .and_then(|_| { + this.block_prover + .prove(ordered_batches, block_inputs, &header) + .map_err(|err| Status::new(tonic::Code::Internal, err.as_report())) + }) + .await + .map(Response::new) } .in_current_span(), ) @@ -87,7 +131,8 @@ impl block_producer_server::BlockProducer for StoreApi { .map_err(|err| { tonic::Status::internal(err.as_report_context("joining apply_block task failed")) }) - .flatten() + .flatten()?; + Ok(Response::new(())) } /// Returns data needed by the block producer to construct and prove the next block. diff --git a/crates/store/src/server/block_prover_client.rs b/crates/store/src/server/block_prover_client.rs new file mode 100644 index 000000000..127ec36bd --- /dev/null +++ b/crates/store/src/server/block_prover_client.rs @@ -0,0 +1,56 @@ +use miden_block_prover::{BlockProverError, LocalBlockProver}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof}; +use miden_remote_prover_client::RemoteProverClientError; +use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; +use tracing::instrument; + +use crate::COMPONENT; + +#[derive(Debug, thiserror::Error)] +pub enum StoreProverError { + #[error("local proving failed")] + LocalProvingFailed(#[source] BlockProverError), + #[error("remote proving failed")] + RemoteProvingFailed(#[source] RemoteProverClientError), +} + +// BLOCK PROVER +// ================================================================================================ + +/// Block prover which allows for proving via either local or remote backend. +/// +/// The local proving variant is intended for development and testing purposes. +/// The remote proving variant is intended for production use. +pub enum BlockProver { + Local(LocalBlockProver), + Remote(RemoteBlockProver), +} + +impl BlockProver { + pub fn local() -> Self { + Self::Local(LocalBlockProver::new(0)) + } + + pub fn remote(endpoint: impl Into) -> Self { + Self::Remote(RemoteBlockProver::new(endpoint)) + } + + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn prove( + &self, + tx_batches: OrderedBatches, + block_inputs: BlockInputs, + block_header: &BlockHeader, + ) -> Result { + match self { + Self::Local(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .map_err(StoreProverError::LocalProvingFailed)?), + Self::Remote(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .await + .map_err(StoreProverError::RemoteProvingFailed)?), + } + } +} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index b4b5798db..3a284ceff 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -18,15 +18,17 @@ use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; use tower_http::trace::TraceLayer; use tracing::{info, instrument}; +use url::Url; use crate::blocks::BlockStore; use crate::db::Db; use crate::errors::ApplyBlockError; use crate::state::State; -use crate::{COMPONENT, GenesisState}; +use crate::{BlockProver, COMPONENT, GenesisState}; mod api; mod block_producer; +pub mod block_prover_client; mod ntx_builder; mod rpc_api; @@ -35,6 +37,8 @@ pub struct Store { pub rpc_listener: TcpListener, pub ntx_builder_listener: TcpListener, pub block_producer_listener: TcpListener, + /// URL for the Block Prover client. Uses local prover if `None`. + pub block_prover_url: Option, pub data_directory: PathBuf, /// Server-side timeout for an individual gRPC request. /// @@ -100,14 +104,25 @@ impl Store { .context("failed to load state")?, ); - let rpc_service = - store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + // Initialize local or remote block prover. + let block_prover = if let Some(url) = self.block_prover_url { + Arc::new(BlockProver::remote(url)) + } else { + Arc::new(BlockProver::local()) + }; + + let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { + state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), + }); let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let block_producer_service = store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs index 9b0bf6237..dfd0583b2 100644 --- a/crates/store/src/state/apply_block.rs +++ b/crates/store/src/state/apply_block.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use miden_node_utils::ErrorReport; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::ProvenBlock; +use miden_protocol::block::SignedBlock; use miden_protocol::note::NoteDetails; use miden_protocol::transaction::OutputNote; use miden_protocol::utils::Serializable; @@ -41,13 +41,14 @@ impl State { // TODO: This span is logged in a root span, we should connect it to the parent span. #[allow(clippy::too_many_lines)] #[instrument(target = COMPONENT, skip_all, err)] - pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { + pub async fn apply_block(&self, signed_block: SignedBlock) -> Result<(), ApplyBlockError> { let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; - let header = block.header(); - - let tx_commitment = block.body().transactions().commitment(); + let header = signed_block.header(); + let body = signed_block.body(); + // Validate that header and body match. + let tx_commitment = body.transactions().commitment(); if header.tx_commitment() != tx_commitment { return Err(InvalidBlockError::InvalidBlockTxCommitment { expected: tx_commitment, @@ -59,13 +60,12 @@ impl State { let block_num = header.block_num(); let block_commitment = header.commitment(); - // ensures the right block header is being processed + // Validate that the applied block is the next block in sequence. let prev_block = self .db .select_block_header_by_block_num(None) .await? .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; - let expected_block_num = prev_block.block_num().child(); if block_num != expected_block_num { return Err(InvalidBlockError::NewBlockInvalidBlockNum { @@ -78,20 +78,19 @@ impl State { return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); } - let block_data = block.to_bytes(); - // Save the block to the block store. In a case of a rolled-back DB transaction, the // in-memory state will be unchanged, but the block might still be written into the // block store. Thus, such block should be considered as block candidates, but not // finalized blocks. So we should check for the latest block when getting block from // the store. + let signed_block_bytes = signed_block.to_bytes(); let store = Arc::clone(&self.block_store); let block_save_task = tokio::spawn( - async move { store.save_block(block_num, &block_data).await }.in_current_span(), + async move { store.save_block(block_num, &signed_block_bytes).await }.in_current_span(), ); - // scope to read in-memory data, compute mutations required for updating account - // and nullifier trees, and validate the request + // Scope to read in-memory data, compute mutations required for updating account + // and nullifier trees, and validate the request. let ( nullifier_tree_old_root, nullifier_tree_update, @@ -103,8 +102,7 @@ impl State { let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = block - .body() + let duplicate_nullifiers: Vec<_> = body .created_nullifiers() .iter() .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) @@ -126,11 +124,7 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -147,9 +141,7 @@ impl State { let account_tree_update = inner .account_tree .compute_mutations( - block - .body() - .updated_accounts() + body.updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) @@ -177,14 +169,13 @@ impl State { ) }; - // build note tree - let note_tree = block.body().compute_block_note_tree(); + // Build note tree. + let note_tree = body.compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } - let notes = block - .body() + let notes = body .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -215,20 +206,20 @@ impl State { }) .collect::, InvalidBlockError>>()?; - // Signals the transaction is ready to be committed, and the write lock can be acquired + // Signals the transaction is ready to be committed, and the write lock can be acquired. let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); - // Signals the write lock has been acquired, and the transaction can be committed + // Signals the write lock has been acquired, and the transaction can be committed. let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); // Extract public account updates with deltas before block is moved into async task. // Private accounts are filtered out since they don't expose their state changes. let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { AccountUpdateDetails::Delta(delta) => Some(delta.clone()), AccountUpdateDetails::Private => None, - } - })); + }, + )); // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the @@ -236,17 +227,17 @@ impl State { // spawned. let db = Arc::clone(&self.db); let db_update_task = tokio::spawn( - async move { db.apply_block(allow_acquire, acquire_done, block, notes).await } + async move { db.apply_block(allow_acquire, acquire_done, signed_block, notes).await } .in_current_span(), ); - // Wait for the message from the DB update task, that we ready to commit the DB transaction + // Wait for the message from the DB update task, that we ready to commit the DB transaction. acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; - // Awaiting the block saving task to complete without errors + // Awaiting the block saving task to complete without errors. block_save_task.await??; - // Scope to update the in-memory data + // Scope to update the in-memory data. async move { // We need to hold the write lock here to prevent inconsistency between the in-memory // state and the DB state. Thus, we need to wait for the DB update task to complete @@ -264,7 +255,7 @@ impl State { } // Notify the DB update task that the write lock has been acquired, so it can commit - // the DB transaction + // the DB transaction. inform_acquire_done .send(()) .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 001dc4098..c71e853da 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -96,7 +96,7 @@ service Rpc { // Store API for the BlockProducer component service BlockProducer { // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + rpc ApplyBlock(ApplyBlockRequest) returns (google.protobuf.Empty) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. @@ -112,6 +112,18 @@ service BlockProducer { rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} } +// APPLY BLOCK REQUEST +// ================================================================================================ + +// Applies a block to the state. +message ApplyBlockRequest { + // Ordered batches encoded using [winter_utils::Serializable] implementation for + // [miden_objects::batch::OrderedBatches]. + bytes ordered_batches = 1; + // Block signed by the Validator. + blockchain.SignedBlock block = 2; +} + // GET BLOCK INPUTS // ================================================================================================ diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 6f53cd4f3..43828d4dc 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -7,11 +7,11 @@ import "types/primitives.proto"; // BLOCK // ================================================================================================ -// Represents a block. -message Block { - // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::block::Block]. - bytes block = 1; +// Represents a signed block. +message SignedBlock { + BlockHeader header = 1; + BlockBody body = 2; + BlockSignature signature = 3; } // Represents a proposed block. From 925b2239025ccc731b170bf210ae645303350e2e Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Mon, 2 Feb 2026 20:11:59 +1300 Subject: [PATCH 10/77] chore(prover): re-export prover's from crate root (#1626) This removes a few levels of path nesting. --- crates/block-producer/src/batch_builder/mod.rs | 2 +- crates/ntx-builder/src/actor/execute.rs | 2 +- crates/ntx-builder/src/actor/mod.rs | 2 +- crates/remote-prover-client/src/lib.rs | 9 ++++++++- crates/store/src/server/block_prover_client.rs | 3 +-- 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index e3cc714c2..34dab83a3 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -9,7 +9,7 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; -use miden_remote_prover_client::remote_prover::batch_prover::RemoteBatchProver; +use miden_remote_prover_client::RemoteBatchProver; use miden_tx_batch_prover::LocalBatchProver; use rand::Rng; use tokio::task::JoinSet; diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index f90da19ab..671270486 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -31,7 +31,7 @@ use miden_protocol::transaction::{ TransactionInputs, }; use miden_protocol::vm::FutureMaybeSend; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; use miden_tx::utils::Serializable; use miden_tx::{ diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 74d8cb952..da7f8947d 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -18,7 +18,7 @@ use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; use miden_protocol::note::NoteScript; use miden_protocol::transaction::TransactionId; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; diff --git a/crates/remote-prover-client/src/lib.rs b/crates/remote-prover-client/src/lib.rs index d2e0d0182..27b6fa049 100644 --- a/crates/remote-prover-client/src/lib.rs +++ b/crates/remote-prover-client/src/lib.rs @@ -15,7 +15,14 @@ extern crate std; use thiserror::Error; -pub mod remote_prover; +mod remote_prover; + +#[cfg(feature = "batch-prover")] +pub use remote_prover::batch_prover::RemoteBatchProver; +#[cfg(feature = "block-prover")] +pub use remote_prover::block_prover::RemoteBlockProver; +#[cfg(feature = "tx-prover")] +pub use remote_prover::tx_prover::RemoteTransactionProver; /// ERRORS /// =============================================================================================== diff --git a/crates/store/src/server/block_prover_client.rs b/crates/store/src/server/block_prover_client.rs index 127ec36bd..5af15ac43 100644 --- a/crates/store/src/server/block_prover_client.rs +++ b/crates/store/src/server/block_prover_client.rs @@ -1,8 +1,7 @@ use miden_block_prover::{BlockProverError, LocalBlockProver}; use miden_protocol::batch::OrderedBatches; use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof}; -use miden_remote_prover_client::RemoteProverClientError; -use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; +use miden_remote_prover_client::{RemoteBlockProver, RemoteProverClientError}; use tracing::instrument; use crate::COMPONENT; From ce05e7a41a6207460e43c9b513168bd042a8784e Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 2 Feb 2026 08:19:46 -0300 Subject: [PATCH 11/77] refactor: ntx builder start up (#1610) --- CHANGELOG.md | 4 + bin/node/src/commands/bundled.rs | 35 +-- bin/node/src/commands/mod.rs | 17 ++ crates/ntx-builder/src/actor/account_state.rs | 12 +- crates/ntx-builder/src/actor/mod.rs | 17 +- crates/ntx-builder/src/block_producer.rs | 4 +- crates/ntx-builder/src/builder.rs | 204 ++++++--------- crates/ntx-builder/src/coordinator.rs | 13 +- crates/ntx-builder/src/lib.rs | 234 +++++++++++++++++- 9 files changed, 378 insertions(+), 162 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd49c0cdf..437a27b8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,10 @@ - Fixed network monitor faucet test failing to parse `/get_metadata` response due to field type mismatches ([#1612](https://github.com/0xMiden/miden-node/pull/1612)). +### Changes + +- Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). + ## v0.13.2 (2026-01-27) - Network transaction builder no longer creates conflicting transactions by consuming the same notes twice ([#1597](https://github.com/0xMiden/miden-node/issues/1597)). diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index d3940d454..9cfc654b1 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -4,7 +4,6 @@ use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; -use miden_node_ntx_builder::NetworkTransactionBuilder; use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; @@ -304,27 +303,29 @@ impl BundledCommand { ]); // Start network transaction builder. The endpoint is available after loading completes. - let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) - .context("Failed to parse URL")?; - if should_start_ntx_builder { + let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) + .context("Failed to parse URL")?; let validator_url = Url::parse(&format!("http://{validator_address}")) .context("Failed to parse URL")?; + let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) + .context("Failed to parse URL")?; + + let builder_config = ntx_builder.into_builder_config( + store_ntx_builder_url, + block_producer_url, + validator_url, + ); + let id = join_set .spawn(async move { - let block_producer_url = - Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - NetworkTransactionBuilder::new( - store_ntx_builder_url, - block_producer_url, - validator_url, - ntx_builder.tx_prover_url, - ntx_builder.script_cache_size, - ) - .run() - .await - .context("failed while serving ntx builder component") + builder_config + .build() + .await + .context("failed to initialize ntx builder")? + .run() + .await + .context("failed while serving ntx builder component") }) .id(); component_ids.insert(id, "ntx-builder"); diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 62a288664..5b1e8e52a 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -68,6 +68,9 @@ pub struct NtxBuilderConfig { )] pub ticker_interval: Duration, + /// Number of note scripts to cache locally. + /// + /// Note scripts not in cache must first be retrieved from the store. #[arg( long = "ntx-builder.script-cache-size", env = ENV_NTX_SCRIPT_CACHE_SIZE, @@ -77,6 +80,20 @@ pub struct NtxBuilderConfig { pub script_cache_size: NonZeroUsize, } +impl NtxBuilderConfig { + /// Converts this CLI config into the ntx-builder's internal config. + pub fn into_builder_config( + self, + store_url: Url, + block_producer_url: Url, + validator_url: Url, + ) -> miden_node_ntx_builder::NtxBuilderConfig { + miden_node_ntx_builder::NtxBuilderConfig::new(store_url, block_producer_url, validator_url) + .with_tx_prover_url(self.tx_prover_url) + .with_script_cache_size(self.script_cache_size) + } +} + /// Configuration for the Block Producer component #[derive(clap::Args)] pub struct BlockProducerConfig { diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index 25020c8b2..ad0b15ebc 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -72,9 +72,6 @@ pub struct NetworkAccountState { } impl NetworkAccountState { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 30; - /// Load's all available network notes from the store, along with the required account states. #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] pub async fn load( @@ -110,14 +107,21 @@ impl NetworkAccountState { } /// Selects the next candidate network transaction. + /// + /// # Parameters + /// + /// - `limit`: Maximum number of notes to include in the transaction. + /// - `max_note_attempts`: Maximum number of execution attempts before a note is dropped. + /// - `chain_state`: Current chain state for the transaction. #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] pub fn select_candidate( &mut self, limit: NonZeroUsize, + max_note_attempts: usize, chain_state: ChainState, ) -> Option { // Remove notes that have failed too many times. - self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); + self.account.drop_failing_notes(max_note_attempts); // Skip empty accounts, and prune them. // This is how we keep the number of accounts bounded. diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index da7f8947d..dd15c8e0e 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -3,6 +3,7 @@ mod execute; mod inflight_note; mod note_state; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; @@ -66,6 +67,10 @@ pub struct AccountActorContext { /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. /// This cache is shared across all account actors to maximize cache efficiency. pub script_cache: LruCache, + /// Maximum number of notes per transaction. + pub max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + pub max_note_attempts: usize, } // ACCOUNT ORIGIN @@ -161,6 +166,10 @@ pub struct AccountActor { prover: Option, chain_state: Arc>, script_cache: LruCache, + /// Maximum number of notes per transaction. + max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + max_note_attempts: usize, } impl AccountActor { @@ -192,6 +201,8 @@ impl AccountActor { prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), + max_notes_per_tx: actor_context.max_notes_per_tx, + max_note_attempts: actor_context.max_note_attempts, } } @@ -258,7 +269,11 @@ impl AccountActor { // Read the chain state. let chain_state = self.chain_state.read().await.clone(); // Find a candidate transaction and execute it. - if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { + if let Some(tx_candidate) = state.select_candidate( + self.max_notes_per_tx, + self.max_note_attempts, + chain_state, + ) { self.execute_transactions(&mut state, tx_candidate).await; } else { // No transactions to execute, wait for events. diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index ce4d7b9c6..53925bdcf 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -62,7 +62,7 @@ impl BlockProducerClient { pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let mut retry_counter = 0; loop { match self.subscribe_to_mempool(chain_tip).await { @@ -90,7 +90,7 @@ impl BlockProducerClient { async fn subscribe_to_mempool( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let request = proto::block_producer::MempoolSubscriptionRequest { chain_tip: chain_tip.as_u32() }; let stream = self.client.clone().mempool_subscription(request).await?; diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 8b789779f..71abe49ee 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -1,32 +1,23 @@ -use std::num::NonZeroUsize; +use std::pin::Pin; use std::sync::Arc; use anyhow::Context; -use futures::TryStreamExt; +use futures::Stream; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_utils::lru_cache::LruCache; -use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::BlockHeader; use miden_protocol::crypto::merkle::mmr::PartialMmr; -use miden_protocol::note::NoteScript; use miden_protocol::transaction::PartialBlockchain; use tokio::sync::{RwLock, mpsc}; -use url::Url; +use tokio_stream::StreamExt; +use tonic::Status; -use crate::MAX_IN_PROGRESS_TXS; +use crate::NtxBuilderConfig; use crate::actor::{AccountActorContext, AccountOrigin}; -use crate::block_producer::BlockProducerClient; use crate::coordinator::Coordinator; use crate::store::StoreClient; -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - // CHAIN STATE // ================================================================================================ @@ -42,7 +33,7 @@ pub struct ChainState { impl ChainState { /// Constructs a new instance of [`ChainState`]. - fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + pub(crate) fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { let chain_mmr = PartialBlockchain::new(chain_mmr, []) .expect("partial blockchain should build from partial mmr"); Self { chain_tip_header, chain_mmr } @@ -58,103 +49,75 @@ impl ChainState { // NETWORK TRANSACTION BUILDER // ================================================================================================ +/// A boxed, pinned stream of mempool events with a `'static` lifetime. +/// +/// Boxing gives the stream a `'static` lifetime by ensuring it owns all its data, avoiding +/// complex lifetime annotations that would otherwise be required when storing `impl TryStream`. +pub(crate) type MempoolEventStream = + Pin> + Send>>; + /// Network transaction builder component. /// -/// The network transaction builder is in in charge of building transactions that consume notes +/// The network transaction builder is in charge of building transactions that consume notes /// against network accounts. These notes are identified and communicated by the block producer. /// The service maintains a list of unconsumed notes and periodically executes and proves /// transactions that consume them (reaching out to the store to retrieve state as necessary). /// /// The builder manages the tasks for every network account on the chain through the coordinator. +/// +/// Create an instance using [`NtxBuilderConfig::build()`]. pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the Validator server. - validator_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the performance impact. - tx_prover_url: Option, - /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. - /// This cache is shared across all account actors. - script_cache: LruCache, + /// Configuration for the builder. + config: NtxBuilderConfig, /// Coordinator for managing actor tasks. coordinator: Coordinator, + /// Client for the store gRPC API. + store: StoreClient, + /// Shared chain state updated by the event loop and read by actors. + chain_state: Arc>, + /// Context shared with all account actors. + actor_context: AccountActorContext, + /// Stream of mempool events from the block producer. + mempool_events: MempoolEventStream, } impl NetworkTransactionBuilder { - /// Channel capacity for account loading. - const ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; - - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - validator_url: Url, - tx_prover_url: Option, - script_cache_size: NonZeroUsize, + pub(crate) fn new( + config: NtxBuilderConfig, + coordinator: Coordinator, + store: StoreClient, + chain_state: Arc>, + actor_context: AccountActorContext, + mempool_events: MempoolEventStream, ) -> Self { - let script_cache = LruCache::new(script_cache_size); - let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); Self { - store_url, - block_producer_url, - validator_url, - tx_prover_url, - script_cache, + config, coordinator, + store, + chain_state, + actor_context, + mempool_events, } } - /// Runs the network transaction builder until a fatal error occurs. + /// Runs the network transaction builder event loop until a fatal error occurs. + /// + /// This method: + /// 1. Spawns a background task to load existing network accounts from the store + /// 2. Runs the main event loop, processing mempool events and managing actors + /// + /// # Errors + /// + /// Returns an error if: + /// - The mempool event stream ends unexpectedly + /// - An actor encounters a fatal error + /// - The account loader task fails pub async fn run(mut self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url.clone()); - let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); - - // Loop until we successfully subscribe. - // - // The mempool rejects our subscription if we don't have the same view of the chain aka - // if our chain tip does not match the mempools. This can occur if a new block is committed - // _after_ we fetch the chain tip from the store but _before_ our subscription request is - // handled. - // - // This is a hack-around for https://github.com/0xMiden/miden-node/issues/1566. - let (chain_tip_header, chain_mmr, mut mempool_events) = loop { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - match block_producer - .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) - .await - { - Ok(subscription) => break (chain_tip_header, chain_mmr, subscription), - Err(status) if status.code() == tonic::Code::InvalidArgument => { - tracing::error!(err=%status, "mempool subscription failed due to desync, trying again"); - }, - Err(err) => return Err(err).context("failed to subscribe to mempool events"), - } - }; - - // Create chain state that will be updated by the coordinator and read by actors. - let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); - - let actor_context = AccountActorContext { - block_producer_url: self.block_producer_url.clone(), - validator_url: self.validator_url.clone(), - tx_prover_url: self.tx_prover_url.clone(), - chain_state: chain_state.clone(), - store: store.clone(), - script_cache: self.script_cache.clone(), - }; - // Spawn a background task to load network accounts from the store. - // Accounts are sent through a channel in batches and processed in the main event loop. + // Accounts are sent through a channel and processed in the main event loop. let (account_tx, mut account_rx) = - mpsc::channel::(Self::ACCOUNT_CHANNEL_CAPACITY); - let account_loader_store = store.clone(); + mpsc::channel::(self.config.account_channel_capacity); + let account_loader_store = self.store.clone(); let mut account_loader_handle = tokio::spawn(async move { account_loader_store .stream_network_account_ids(account_tx) @@ -162,7 +125,7 @@ impl NetworkTransactionBuilder { .context("failed to load network accounts from store") }); - // Main loop which manages actors and passes mempool events to them. + // Main event loop. loop { tokio::select! { // Handle actor result. @@ -170,22 +133,18 @@ impl NetworkTransactionBuilder { result?; }, // Handle mempool events. - event = mempool_events.try_next() => { + event = self.mempool_events.next() => { let event = event .context("mempool event stream ended")? .context("mempool event stream failed")?; - self.handle_mempool_event( - event.into(), - &actor_context, - chain_state.clone(), - ).await?; + self.handle_mempool_event(event.into()).await?; }, // Handle account batches loaded from the store. // Once all accounts are loaded, the channel closes and this branch // becomes inactive (recv returns None and we stop matching). Some(account_id) = account_rx.recv() => { - self.handle_loaded_account(account_id, &actor_context).await?; + self.handle_loaded_account(account_id).await?; }, // Handle account loader task completion/failure. // If the task fails, we abort since the builder would be in a degraded state @@ -202,33 +161,23 @@ impl NetworkTransactionBuilder { } } - /// Handles a batch of account IDs loaded from the store by spawning actors for them. - #[tracing::instrument( - name = "ntx.builder.handle_loaded_accounts", - skip(self, account_id, actor_context) - )] + /// Handles account IDs loaded from the store by spawning actors for them. + #[tracing::instrument(name = "ntx.builder.handle_loaded_account", skip(self, account_id))] async fn handle_loaded_account( &mut self, account_id: NetworkAccountId, - actor_context: &AccountActorContext, ) -> Result<(), anyhow::Error> { self.coordinator - .spawn_actor(AccountOrigin::store(account_id), actor_context) + .spawn_actor(AccountOrigin::store(account_id), &self.actor_context) .await?; Ok(()) } - /// Handles mempool events by sending them to actors via the coordinator and/or spawning new - /// actors as required. - #[tracing::instrument( - name = "ntx.builder.handle_mempool_event", - skip(self, event, actor_context, chain_state) - )] + /// Handles mempool events by routing them to actors and spawning new actors as needed. + #[tracing::instrument(name = "ntx.builder.handle_mempool_event", skip(self, event))] async fn handle_mempool_event( &mut self, event: Arc, - actor_context: &AccountActorContext, - chain_state: Arc>, ) -> Result<(), anyhow::Error> { match event.as_ref() { MempoolEvent::TransactionAdded { account_delta, .. } => { @@ -236,10 +185,12 @@ impl NetworkTransactionBuilder { if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { // Handle account deltas for network accounts only. if let Some(network_account) = AccountOrigin::transaction(delta) { - // Spawn new actors if a transaction creates a new network account + // Spawn new actors if a transaction creates a new network account. let is_creating_account = delta.is_full_state(); if is_creating_account { - self.coordinator.spawn_actor(network_account, actor_context).await?; + self.coordinator + .spawn_actor(network_account, &self.actor_context) + .await?; } } } @@ -248,11 +199,11 @@ impl NetworkTransactionBuilder { }, // Update chain state and broadcast. MempoolEvent::BlockCommitted { header, txs } => { - self.update_chain_tip(header.as_ref().clone(), chain_state).await; + self.update_chain_tip(header.as_ref().clone()).await; self.coordinator.broadcast(event.clone()).await; - // All transactions pertaining to predating events should now be available through - // the store. So we can now drain them. + // All transactions pertaining to predating events should now be available + // through the store. So we can now drain them. for tx_id in txs { self.coordinator.drain_predating_events(tx_id); } @@ -271,12 +222,9 @@ impl NetworkTransactionBuilder { } } - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { - // Lock the chain state. - let mut chain_state = chain_state.write().await; + /// Updates the chain tip and prunes old blocks from the MMR. + async fn update_chain_tip(&mut self, tip: BlockHeader) { + let mut chain_state = self.chain_state.write().await; // Update MMR which lags by one block. let mmr_tip = chain_state.chain_tip_header.clone(); @@ -286,9 +234,11 @@ impl NetworkTransactionBuilder { chain_state.chain_tip_header = tip; // Keep MMR pruned. - let pruned_block_height = - (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) - as u32; + let pruned_block_height = (chain_state + .chain_mmr + .chain_length() + .as_usize() + .saturating_sub(self.config.max_block_count)) as u32; chain_state.chain_mmr.prune_to(..pruned_block_height.into()); } } diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 285cee47a..959a119fb 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -90,20 +90,21 @@ pub struct Coordinator { /// Cache of events received from the mempool that predate corresponding network accounts. /// Grouped by network account ID to allow targeted event delivery to actors upon creation. predating_events: HashMap>>, + + /// Channel size for each actor's event channel. + actor_channel_size: usize, } impl Coordinator { - /// Maximum number of messages of the message channel for each actor. - const ACTOR_CHANNEL_SIZE: usize = 100; - /// Creates a new coordinator with the specified maximum number of inflight transactions - /// and shared script cache. - pub fn new(max_inflight_transactions: usize) -> Self { + /// and actor channel size. + pub fn new(max_inflight_transactions: usize, actor_channel_size: usize) -> Self { Self { actor_registry: HashMap::new(), actor_join_set: JoinSet::new(), semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), predating_events: HashMap::new(), + actor_channel_size, } } @@ -126,7 +127,7 @@ impl Coordinator { handle.cancel_token.cancel(); } - let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let (event_tx, event_rx) = mpsc::channel(self.actor_channel_size); let cancel_token = tokio_util::sync::CancellationToken::new(); let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); let handle = ActorHandle::new(event_tx, cancel_token); diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 62088ce6c..fe32f850f 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,4 +1,16 @@ use std::num::NonZeroUsize; +use std::sync::Arc; + +use actor::AccountActorContext; +use anyhow::Context; +use block_producer::BlockProducerClient; +use builder::{ChainState, MempoolEventStream}; +use coordinator::Coordinator; +use futures::TryStreamExt; +use miden_node_utils::lru_cache::LruCache; +use store::StoreClient; +use tokio::sync::RwLock; +use url::Url; mod actor; mod block_producer; @@ -13,12 +25,224 @@ pub use builder::NetworkTransactionBuilder; const COMPONENT: &str = "miden-ntx-builder"; -/// Maximum number of network notes a network transaction is allowed to consume. -const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).unwrap(); -const _: () = assert!(MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); +/// Default maximum number of network notes a network transaction is allowed to consume. +const DEFAULT_MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).expect("literal is non-zero"); +const _: () = assert!(DEFAULT_MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); -/// Maximum number of network transactions which should be in progress concurrently. +/// Default maximum number of network transactions which should be in progress concurrently. /// /// This only counts transactions which are being computed locally and does not include /// uncommitted transactions in the mempool. -const MAX_IN_PROGRESS_TXS: usize = 4; +const DEFAULT_MAX_CONCURRENT_TXS: usize = 4; + +/// Default maximum number of blocks to keep in the chain MMR. +const DEFAULT_MAX_BLOCK_COUNT: usize = 4; + +/// Default channel capacity for account loading from the store. +const DEFAULT_ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; + +/// Default channel size for actor event channels. +const DEFAULT_ACTOR_CHANNEL_SIZE: usize = 100; + +/// Default maximum number of attempts to execute a failing note before dropping it. +const DEFAULT_MAX_NOTE_ATTEMPTS: usize = 30; + +/// Default script cache size. +const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = + NonZeroUsize::new(1_000).expect("literal is non-zero"); + +// CONFIGURATION +// ================================================================================================= + +/// Configuration for the Network Transaction Builder. +/// +/// This struct contains all the settings needed to create and run a `NetworkTransactionBuilder`. +#[derive(Debug, Clone)] +pub struct NtxBuilderConfig { + /// Address of the store gRPC server (ntx-builder API). + pub store_url: Url, + + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + + /// Address of the validator gRPC server. + pub validator_url: Url, + + /// Address of the remote transaction prover. If `None`, transactions will be proven locally. + pub tx_prover_url: Option, + + /// Size of the LRU cache for note scripts. Scripts are fetched from the store and cached + /// to avoid repeated gRPC calls. + pub script_cache_size: NonZeroUsize, + + /// Maximum number of network transactions which should be in progress concurrently across + /// all account actors. + pub max_concurrent_txs: usize, + + /// Maximum number of network notes a single transaction is allowed to consume. + pub max_notes_per_tx: NonZeroUsize, + + /// Maximum number of attempts to execute a failing note before dropping it. + /// Notes use exponential backoff between attempts. + pub max_note_attempts: usize, + + /// Maximum number of blocks to keep in the chain MMR. Older blocks are pruned. + pub max_block_count: usize, + + /// Channel capacity for loading accounts from the store during startup. + pub account_channel_capacity: usize, + + /// Channel size for each actor's event channel. + pub actor_channel_size: usize, +} + +impl NtxBuilderConfig { + pub fn new(store_url: Url, block_producer_url: Url, validator_url: Url) -> Self { + Self { + store_url, + block_producer_url, + validator_url, + tx_prover_url: None, + script_cache_size: DEFAULT_SCRIPT_CACHE_SIZE, + max_concurrent_txs: DEFAULT_MAX_CONCURRENT_TXS, + max_notes_per_tx: DEFAULT_MAX_NOTES_PER_TX, + max_note_attempts: DEFAULT_MAX_NOTE_ATTEMPTS, + max_block_count: DEFAULT_MAX_BLOCK_COUNT, + account_channel_capacity: DEFAULT_ACCOUNT_CHANNEL_CAPACITY, + actor_channel_size: DEFAULT_ACTOR_CHANNEL_SIZE, + } + } + + /// Sets the remote transaction prover URL. + /// + /// If not set, transactions will be proven locally. + #[must_use] + pub fn with_tx_prover_url(mut self, url: Option) -> Self { + self.tx_prover_url = url; + self + } + + /// Sets the script cache size. + #[must_use] + pub fn with_script_cache_size(mut self, size: NonZeroUsize) -> Self { + self.script_cache_size = size; + self + } + + /// Sets the maximum number of concurrent transactions. + #[must_use] + pub fn with_max_concurrent_txs(mut self, max: usize) -> Self { + self.max_concurrent_txs = max; + self + } + + /// Sets the maximum number of notes per transaction. + /// + /// # Panics + /// + /// Panics if `max` exceeds `miden_tx::MAX_NUM_CHECKER_NOTES`. + #[must_use] + pub fn with_max_notes_per_tx(mut self, max: NonZeroUsize) -> Self { + assert!( + max.get() <= miden_tx::MAX_NUM_CHECKER_NOTES, + "max_notes_per_tx ({}) exceeds MAX_NUM_CHECKER_NOTES ({})", + max, + miden_tx::MAX_NUM_CHECKER_NOTES + ); + self.max_notes_per_tx = max; + self + } + + /// Sets the maximum number of note execution attempts. + #[must_use] + pub fn with_max_note_attempts(mut self, max: usize) -> Self { + self.max_note_attempts = max; + self + } + + /// Sets the maximum number of blocks to keep in the chain MMR. + #[must_use] + pub fn with_max_block_count(mut self, max: usize) -> Self { + self.max_block_count = max; + self + } + + /// Sets the account channel capacity for startup loading. + #[must_use] + pub fn with_account_channel_capacity(mut self, capacity: usize) -> Self { + self.account_channel_capacity = capacity; + self + } + + /// Sets the actor event channel size. + #[must_use] + pub fn with_actor_channel_size(mut self, size: usize) -> Self { + self.actor_channel_size = size; + self + } + + /// Builds and initializes the network transaction builder. + /// + /// This method connects to the store and block producer services, fetches the current + /// chain tip, and subscribes to mempool events. + /// + /// # Errors + /// + /// Returns an error if: + /// - The store connection fails + /// - The mempool subscription fails (after retries) + /// - The store contains no blocks (not bootstrapped) + pub async fn build(self) -> anyhow::Result { + let script_cache = LruCache::new(self.script_cache_size); + let coordinator = Coordinator::new(self.max_concurrent_txs, self.actor_channel_size); + + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + let (chain_tip_header, chain_mmr, mempool_events) = loop { + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .context("store should contain a latest block")?; + + match block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + { + Ok(subscription) => { + let stream: MempoolEventStream = Box::pin(subscription.into_stream()); + break (chain_tip_header, chain_mmr, stream); + }, + Err(status) if status.code() == tonic::Code::InvalidArgument => { + tracing::warn!( + err = %status, + "mempool subscription failed due to chain tip desync, retrying" + ); + }, + Err(err) => return Err(err).context("failed to subscribe to mempool events"), + } + }; + + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + validator_url: self.validator_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache, + max_notes_per_tx: self.max_notes_per_tx, + max_note_attempts: self.max_note_attempts, + }; + + Ok(NetworkTransactionBuilder::new( + self, + coordinator, + store, + chain_state, + actor_context, + mempool_events, + )) + } +} From c54f4e6229789be53fb74e517a3971f9c1693368 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 2 Feb 2026 08:31:10 -0300 Subject: [PATCH 12/77] chore(rpc): refactor account delta check test (#1621) --- crates/rpc/src/tests.rs | 140 +++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 79 deletions(-) diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 559d5b6aa..3d87c8328 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -19,6 +19,7 @@ use miden_node_utils::limiter::{ use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ + Account, AccountBuilder, AccountDelta, AccountId, @@ -28,7 +29,7 @@ use miden_protocol::account::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::testing::noop_auth_component::NoopAuthComponent; -use miden_protocol::transaction::ProvenTransactionBuilder; +use miden_protocol::transaction::{ProvenTransaction, ProvenTransactionBuilder}; use miden_protocol::utils::Serializable; use miden_protocol::vm::ExecutionProof; use miden_standards::account::wallets::BasicWallet; @@ -40,6 +41,53 @@ use url::Url; use crate::Rpc; +/// Byte offset of the account delta commitment in serialized `ProvenTransaction`. +/// Layout: `AccountId` (15) + `initial_commitment` (32) + `final_commitment` (32) = 79 +const DELTA_COMMITMENT_BYTE_OFFSET: usize = 15 + 32 + 32; + +/// Creates a minimal account and its delta for testing proven transaction building. +fn build_test_account(seed: [u8; 32]) -> (Account, AccountDelta) { + let account = AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let delta: AccountDelta = account.clone().try_into().unwrap(); + (account, delta) +} + +/// Creates a minimal proven transaction for testing. +/// +/// This uses `ExecutionProof::new_dummy()` and is intended for tests that +/// need to test validation logic. +fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransaction { + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.commitment(), + delta.to_commitment(), + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(delta.clone())) + .build() + .unwrap() +} + #[tokio::test] async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. @@ -209,54 +257,19 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); + // Build a valid proven transaction + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let other_account = AccountBuilder::new([1; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Private) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - let incorrect_commitment_delta: AccountDelta = other_account.try_into().unwrap(); - let incorrect_commitment_delta_bytes = incorrect_commitment_delta.to_commitment().as_bytes(); - - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + // Create an incorrect delta commitment from a different account + let (other_account, _) = build_test_account([1; 32]); + let incorrect_delta: AccountDelta = other_account.try_into().unwrap(); + let incorrect_commitment_bytes = incorrect_delta.to_commitment().as_bytes(); + // Corrupt the transaction bytes with the incorrect delta commitment let mut tx_bytes = tx.to_bytes(); - let offset = 15 + 32 + 32; - tx_bytes[offset..offset + 32].copy_from_slice(&incorrect_commitment_delta_bytes); + tx_bytes[DELTA_COMMITMENT_BYTE_OFFSET..DELTA_COMMITMENT_BYTE_OFFSET + 32] + .copy_from_slice(&incorrect_commitment_bytes); let request = proto::transaction::ProvenTransaction { transaction: tx_bytes, @@ -295,39 +308,8 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); let request = proto::transaction::ProvenTransaction { transaction: tx.to_bytes(), From f035ce42b2464e3444b99676ed3b6eb504248b6c Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 2 Feb 2026 16:16:05 +0100 Subject: [PATCH 13/77] feat/db: paged loading of nullifiers and account commitments (#1536) --- CHANGELOG.md | 8 +- crates/store/src/db/mod.rs | 42 ++-- .../store/src/db/models/queries/accounts.rs | 138 +++++++++--- crates/store/src/db/models/queries/mod.rs | 1 + .../store/src/db/models/queries/nullifiers.rs | 63 ++++++ crates/store/src/errors.rs | 11 + crates/store/src/state/loader.rs | 209 +++++++++++++++--- crates/store/src/state/mod.rs | 6 +- 8 files changed, 392 insertions(+), 86 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 437a27b8b..92243d52c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,10 +62,12 @@ - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). - Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). -- Pined tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). -- Added `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). -- Added check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)). ### Changes diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 2ea19ea35..6b7ecec6a 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -30,6 +30,11 @@ use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; use crate::db::models::queries::StorageMapValuesPage; +pub use crate::db::models::queries::{ + AccountCommitmentsPage, + NullifiersPage, + PublicAccountIdsPage, +}; use crate::db::models::{Page, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; @@ -325,12 +330,15 @@ impl Db { Ok(me) } - /// Loads all the nullifiers from the DB. + /// Returns a page of nullifiers for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub(crate) async fn select_all_nullifiers(&self) -> Result> { - self.transact("all nullifiers", move |conn| { - let nullifiers = queries::select_all_nullifiers(conn)?; - Ok(nullifiers) + pub async fn select_nullifiers_paged( + &self, + page_size: std::num::NonZeroUsize, + after_nullifier: Option, + ) -> Result { + self.transact("read nullifiers paged", move |conn| { + queries::select_nullifiers_paged(conn, page_size, after_nullifier) }) .await } @@ -396,20 +404,28 @@ impl Db { .await } - /// TODO marked for removal, replace with paged version + /// Returns a page of account commitments for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_account_commitments(&self) -> Result> { - self.transact("read all account commitments", move |conn| { - queries::select_all_account_commitments(conn) + pub async fn select_account_commitments_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read account commitments paged", move |conn| { + queries::select_account_commitments_paged(conn, page_size, after_account_id) }) .await } - /// Returns all account IDs that have public state. + /// Returns a page of public account IDs for forest rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_public_account_ids(&self) -> Result> { - self.transact("read all public account IDs", move |conn| { - queries::select_all_public_account_ids(conn) + pub async fn select_public_account_ids_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read public account IDs paged", move |conn| { + queries::select_public_account_ids_paged(conn, page_size, after_account_id) }) .await } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 1f4f67533..85bead244 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -254,11 +255,19 @@ pub(crate) fn select_network_account_by_id( } } -/// Select all account commitments from the DB using the given [`SqliteConnection`]. -/// -/// # Returns +/// Page of account commitments returned by [`select_account_commitments_paged`]. +#[derive(Debug)] +pub struct AccountCommitmentsPage { + /// The account commitments in this page. + pub commitments: Vec<(AccountId, Word)>, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + +/// Selects account commitments with pagination. /// -/// The vector with the account id and corresponding commitment, or an error. +/// Returns up to `page_size` account commitments, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. /// /// # Raw SQL /// @@ -270,31 +279,71 @@ pub(crate) fn select_network_account_by_id( /// accounts /// WHERE /// is_latest = 1 +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_account_commitments( +pub(crate) fn select_account_commitments_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - let raw = SelectDsl::select( + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; + + // Fetch one extra to determine if there are more results + #[allow(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::<(Vec, Vec)>(conn)?; + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } - Result::, DatabaseError>::from_iter(raw.into_iter().map( + let raw = query.load::<(Vec, Vec)>(conn)?; + + let mut commitments = Result::, DatabaseError>::from_iter(raw.into_iter().map( |(ref account, ref commitment)| { Ok((AccountId::read_from_bytes(account)?, Word::read_from_bytes(commitment)?)) }, - )) + ))?; + + // If we got more than page_size, there are more results + let next_cursor = if commitments.len() > page_size.get() { + commitments.pop(); // Remove the extra element + commitments.last().map(|(id, _)| *id) + } else { + None + }; + + Ok(AccountCommitmentsPage { commitments, next_cursor }) +} + +/// Page of public account IDs returned by [`select_public_account_ids_paged`]. +#[derive(Debug)] +pub struct PublicAccountIdsPage { + /// The public account IDs in this page. + pub account_ids: Vec, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, } -/// Select all account IDs that have public state. +/// Selects public account IDs with pagination. /// -/// This filters accounts in-memory after loading only the account IDs (not commitments), -/// which is more efficient than loading full commitments when only IDs are needed. +/// Returns up to `page_size` public account IDs, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. +/// +/// Public accounts are those with `AccountStorageMode::Public` or `AccountStorageMode::Network`. +/// We identify them by checking `code_commitment IS NOT NULL` - public accounts store their full +/// state (including `code_commitment`), while private accounts only store the `account_commitment`. /// /// # Raw SQL /// @@ -305,31 +354,48 @@ pub(crate) fn select_all_account_commitments( /// accounts /// WHERE /// is_latest = 1 +/// AND code_commitment IS NOT NULL +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_public_account_ids( +pub(crate) fn select_public_account_ids_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - // We could technically use a `LIKE` constraint for both postgres and sqlite backends, - // but diesel doesn't expose that. - let raw: Vec> = - SelectDsl::select(schema::accounts::table, schema::accounts::account_id) - .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::>(conn)?; + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; - Result::from_iter( - raw.into_iter() - .map(|bytes| { - AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) - }) - .filter_map(|result| match result { - Ok(id) if id.has_public_state() => Some(Ok(id)), - Ok(_) => None, - Err(e) => Some(Err(e)), - }), - ) + #[allow(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::code_commitment.is_not_null()) + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::>(conn)?; + + let mut account_ids: Vec = Result::from_iter(raw.into_iter().map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }))?; + + // If we got more than page_size, there are more results + let next_cursor = if account_ids.len() > page_size.get() { + account_ids.pop(); // Remove the extra element + account_ids.last().copied() + } else { + None + }; + + Ok(PublicAccountIdsPage { account_ids, next_cursor }) } /// Select account vault assets within a block range (inclusive). diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index fe603a876..6de1b6ee1 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -48,6 +48,7 @@ pub use block_headers::*; mod accounts; pub use accounts::*; mod nullifiers; +pub use nullifiers::NullifiersPage; pub(crate) use nullifiers::*; mod notes; pub(crate) use notes::*; diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index 5ab578537..a13911388 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -1,3 +1,4 @@ +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::query_dsl::methods::SelectDsl; @@ -128,6 +129,7 @@ pub(crate) fn select_nullifiers_by_prefix( /// ORDER BY /// block_num ASC /// ``` +#[cfg(test)] pub(crate) fn select_all_nullifiers( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { @@ -137,6 +139,67 @@ pub(crate) fn select_all_nullifiers( vec_raw_try_into(nullifiers_raw) } +/// Page of nullifiers returned by [`select_nullifiers_paged`]. +#[derive(Debug)] +pub struct NullifiersPage { + /// The nullifiers in this page. + pub nullifiers: Vec, + /// If `Some`, there are more results. Use this as the `after_nullifier` for the next page. + pub next_cursor: Option, +} + +/// Selects nullifiers with pagination. +/// +/// Returns up to `page_size` nullifiers, starting after `after_nullifier` if provided. +/// Results are ordered by nullifier bytes for stable pagination. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// nullifier, +/// block_num +/// FROM +/// nullifiers +/// WHERE +/// (nullifier > :after_nullifier OR :after_nullifier IS NULL) +/// ORDER BY +/// nullifier ASC +/// LIMIT :page_size + 1 +/// ``` +pub(crate) fn select_nullifiers_paged( + conn: &mut SqliteConnection, + page_size: NonZeroUsize, + after_nullifier: Option, +) -> Result { + // Fetch one extra to determine if there are more results + #[allow(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = + SelectDsl::select(schema::nullifiers::table, NullifierWithoutPrefixRawRow::as_select()) + .order_by(schema::nullifiers::nullifier.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_nullifier { + query = query.filter(schema::nullifiers::nullifier.gt(cursor.to_bytes())); + } + + let nullifiers_raw = query.load::(conn)?; + let mut nullifiers: Vec = vec_raw_try_into(nullifiers_raw)?; + + // If we got more than page_size, there are more results + let next_cursor = if nullifiers.len() > page_size.get() { + nullifiers.pop(); // Remove the extra element + nullifiers.last().map(|info| info.nullifier) + } else { + None + }; + + Ok(NullifiersPage { nullifiers, next_cursor }) +} + /// Insert nullifiers for a block into the database. /// /// # Parameters diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 679650580..0267a42e7 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -5,6 +5,7 @@ use deadpool_sync::InteractError; use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; +use miden_node_utils::ErrorReport; use miden_node_utils::limiter::QueryLimitError; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -313,6 +314,16 @@ pub enum ApplyBlockError { DbUpdateTaskFailed(String), } +impl From for Status { + fn from(err: ApplyBlockError) -> Self { + match err { + ApplyBlockError::InvalidBlockError(_) => Status::invalid_argument(err.as_report()), + + _ => Status::internal(err.as_report()), + } + } +} + #[derive(Error, Debug, GrpcError)] pub enum GetBlockHeaderError { #[error("database error")] diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea0631..66c5efb44 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -9,15 +9,16 @@ //! data exists, otherwise rebuilt from the database and persisted. use std::future::Future; +use std::num::NonZeroUsize; use std::path::Path; -use miden_protocol::Word; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] use miden_protocol::crypto::merkle::smt::MemoryStorage; use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; +use miden_protocol::{Felt, FieldElement, Word}; #[cfg(feature = "rocksdb")] use tracing::info; use tracing::instrument; @@ -41,6 +42,18 @@ pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; /// Directory name for the nullifier tree storage within the data directory. pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; +/// Page size for loading account commitments from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of accounts. +const ACCOUNT_COMMITMENTS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading nullifiers from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of nullifiers. +const NULLIFIERS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading public account IDs from the database during forest rebuilding. +/// This limits memory usage when rebuilding with millions of public accounts. +const PUBLIC_ACCOUNT_IDS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(1_000).unwrap(); + // STORAGE TYPE ALIAS // ================================================================================================ @@ -66,6 +79,14 @@ pub fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInit } } +/// Converts a block number to the leaf value format used in the nullifier tree. +/// +/// This matches the format used by `NullifierBlock::from(BlockNumber)::into()`, +/// which is `[Felt::from(block_num), 0, 0, 0]`. +fn block_num_to_nullifier_leaf(block_num: BlockNumber) -> Word { + Word::from([Felt::from(block_num), Felt::ZERO, Felt::ZERO, Felt::ZERO]) +} + // STORAGE LOADER TRAIT // ================================================================================================ @@ -103,27 +124,82 @@ impl StorageLoader for MemoryStorage { Ok(MemoryStorage::default()) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + // TODO: Make the loading methodology for account and nullifier trees consistent. + // Currently we use `NullifierTree::new_unchecked()` for nullifiers but `AccountTree::new()` + // for accounts. Consider using `NullifierTree::with_storage_from_entries()` for consistency. + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -141,6 +217,7 @@ impl StorageLoader for RocksDbStorage { .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, @@ -156,15 +233,42 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, @@ -179,10 +283,36 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -223,23 +353,38 @@ pub async fn load_smt_forest( ) -> Result { use miden_protocol::account::delta::AccountDelta; - let public_account_ids = db.select_all_public_account_ids().await?; - - // Acquire write lock once for the entire initialization let mut forest = InnerForest::new(); + let mut cursor = None; + + loop { + let page = db.select_public_account_ids_paged(PUBLIC_ACCOUNT_IDS_PAGE_SIZE, cursor).await?; - // Process each account - for account_id in public_account_ids { - // Get the full account from the database - let account_info = db.select_account(account_id).await?; - let account = account_info.details.expect("public accounts always have details in DB"); + if page.account_ids.is_empty() { + break; + } - // Convert the full account to a full-state delta - let delta = - AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + // Process each account in this page + for account_id in page.account_ids { + // TODO: Loading the full account from the database is inefficient and will need to + // go away. + let account_info = db.select_account(account_id).await?; + let account = account_info + .details + .ok_or(StateInitializationError::PublicAccountMissingDetails(account_id))?; + + // Convert the full account to a full-state delta + let delta = AccountDelta::try_from(account).map_err(|e| { + StateInitializationError::AccountToDeltaConversionFailed(e.to_string()) + })?; + + // Use the unified update method (will recognize it's a full-state delta) + forest.update_account(block_num, &delta)?; + } - // Use the unified update method (will recognize it's a full-state delta) - forest.update_account(block_num, &delta)?; + cursor = page.next_cursor; + if cursor.is_none() { + break; + } } Ok(forest) diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index d14ef560c..55b3204ee 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -53,13 +53,15 @@ use crate::{COMPONENT, DataDirectory}; mod loader; -pub use loader::{ +use loader::{ ACCOUNT_TREE_STORAGE_DIR, NULLIFIER_TREE_STORAGE_DIR, StorageLoader, TreeStorage, + load_mmr, + load_smt_forest, + verify_tree_consistency, }; -use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; mod apply_block; mod sync_state; From 03cd1e1d5758c587a3d07a861a1f3a424451fe4c Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 2 Feb 2026 13:01:36 -0300 Subject: [PATCH 14/77] chore: ntx-builder followups (#1611) * chore: ntx-builder followups * review: improve span formatting Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --------- Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- CHANGELOG.md | 1 + crates/ntx-builder/src/actor/account_state.rs | 73 +++++--- crates/ntx-builder/src/actor/note_state.rs | 169 ++++++++---------- crates/ntx-builder/src/coordinator.rs | 9 +- 4 files changed, 122 insertions(+), 130 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92243d52c..6a760db5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ ### Changes +- Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). ## v0.13.2 (2026-01-27) diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index ad0b15ebc..e82a18929 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -13,7 +13,7 @@ use miden_protocol::transaction::{PartialBlockchain, TransactionId}; use tracing::instrument; use super::ActorShutdownReason; -use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; +use super::note_state::{AccountDeltaTracker, NetworkAccountEffect, NotePool}; use crate::COMPONENT; use crate::actor::inflight_note::InflightNetworkNote; use crate::builder::ChainState; @@ -49,25 +49,26 @@ pub struct TransactionCandidate { /// The current state of a network account. #[derive(Clone)] pub struct NetworkAccountState { - /// The network account ID corresponding to the network account this state represents. + /// The network account ID this state represents. account_id: NetworkAccountId, - /// Component of this state which Contains the committed and inflight account updates as well - /// as available and nullified notes. - account: NetworkAccountNoteState, + /// Tracks committed and inflight account state updates. + account: AccountDeltaTracker, + + /// Manages available and nullified notes. + notes: NotePool, /// Uncommitted transactions which have some impact on the network state. /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. + /// This is tracked so we can commit or revert transaction effects. Transactions _without_ an + /// impact are ignored. inflight_txs: BTreeMap, /// Nullifiers of all network notes targeted at this account. /// /// Used to filter mempool events: when a `TransactionAdded` event reports consumed nullifiers, - /// only those present in this set are processed (moved from `available_notes` to - /// `nullified_notes`). Nullifiers are added when notes are loaded or created, and removed - /// when the consuming transaction is committed. + /// only those present in this set are processed. Nullifiers are added when notes are loaded + /// or created, and removed when the consuming transaction is committed. known_nullifiers: HashSet, } @@ -92,10 +93,15 @@ impl NetworkAccountState { let known_nullifiers: HashSet = notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - let account = NetworkAccountNoteState::new(account, notes); + let account_tracker = AccountDeltaTracker::new(account); + let mut note_pool = NotePool::default(); + for note in notes { + note_pool.add_note(note); + } let state = Self { - account, + account: account_tracker, + notes: note_pool, account_id, inflight_txs: BTreeMap::default(), known_nullifiers, @@ -121,17 +127,17 @@ impl NetworkAccountState { chain_state: ChainState, ) -> Option { // Remove notes that have failed too many times. - self.account.drop_failing_notes(max_note_attempts); + self.notes.drop_failing_notes(max_note_attempts); // Skip empty accounts, and prune them. // This is how we keep the number of accounts bounded. - if self.account.is_empty() { + if self.is_empty() { return None; } // Select notes from the account that can be consumed or are ready for a retry. let notes = self - .account + .notes .available_notes(&chain_state.chain_tip_header.block_num()) .take(limit.get()) .cloned() @@ -158,7 +164,7 @@ impl NetworkAccountState { #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - self.account.fail_notes(nullifiers.as_slice(), block_num); + self.notes.fail_notes(nullifiers.as_slice(), block_num); } /// Updates state with the mempool event. @@ -201,6 +207,11 @@ impl NetworkAccountState { None } + /// Returns `true` if there is no inflight state being tracked. + fn is_empty(&self) -> bool { + self.account.has_no_inflight() && self.notes.is_empty() + } + /// Handles a [`MempoolEvent::TransactionAdded`] event. fn add_transaction( &mut self, @@ -238,7 +249,7 @@ impl NetworkAccountState { ); tx_impact.notes.insert(note.nullifier()); self.known_nullifiers.insert(note.nullifier()); - self.account.add_note(note.clone()); + self.notes.add_note(note.clone()); } for nullifier in nullifiers { // Ignore nullifiers that aren't network note nullifiers. @@ -246,8 +257,7 @@ impl NetworkAccountState { continue; } tx_impact.nullifiers.insert(*nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _ = self.account.add_nullifier(*nullifier); + let _ = self.notes.nullify(*nullifier); } if !tx_impact.is_empty() { @@ -272,7 +282,7 @@ impl NetworkAccountState { if self.known_nullifiers.remove(&nullifier) { // Its possible for the account to no longer exist if the transaction creating it // was reverted. - self.account.commit_nullifier(nullifier); + self.notes.commit_nullifier(nullifier); } } } @@ -296,7 +306,7 @@ impl NetworkAccountState { // Revert notes. for note_nullifier in impact.notes { if self.known_nullifiers.contains(¬e_nullifier) { - self.account.revert_note(note_nullifier); + self.notes.remove_note(note_nullifier); self.known_nullifiers.remove(¬e_nullifier); } } @@ -304,7 +314,7 @@ impl NetworkAccountState { // Revert nullifiers. for nullifier in impact.nullifiers { if self.known_nullifiers.contains(&nullifier) { - self.account.revert_nullifier(nullifier); + self.notes.revert_nullifier(nullifier); self.known_nullifiers.remove(&nullifier); } } @@ -475,10 +485,15 @@ mod tests { let known_nullifiers: HashSet = notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - let account = NetworkAccountNoteState::new(account, notes); + let account_tracker = AccountDeltaTracker::new(account); + let mut note_pool = NotePool::default(); + for note in notes { + note_pool.add_note(note); + } Self { - account, + account: account_tracker, + notes: note_pool, account_id, inflight_txs: BTreeMap::default(), known_nullifiers, @@ -538,7 +553,7 @@ mod tests { let mut state = NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); + let available_count = state.notes.available_notes(&BlockNumber::from(0)).count(); assert_eq!(available_count, 2, "both notes should be available initially"); let tx_id = mock_tx_id(1); @@ -553,7 +568,7 @@ mod tests { assert!(shutdown.is_none(), "mempool_update should not trigger shutdown"); let available_nullifiers: Vec<_> = state - .account + .notes .available_notes(&BlockNumber::from(0)) .map(|n| n.to_inner().nullifier()) .collect(); @@ -634,7 +649,7 @@ mod tests { state.mempool_update(&event); // Verify note is not available - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); + let available_count = state.notes.available_notes(&BlockNumber::from(0)).count(); assert_eq!(available_count, 0, "note should not be available after being consumed"); // Revert the transaction @@ -644,7 +659,7 @@ mod tests { // Verify note is available again let available_nullifiers: Vec<_> = state - .account + .notes .available_notes(&BlockNumber::from(0)) .map(|n| n.to_inner().nullifier()) .collect(); @@ -687,7 +702,7 @@ mod tests { // Verify the note is available let available_nullifiers: Vec<_> = state - .account + .notes .available_notes(&BlockNumber::from(0)) .map(|n| n.to_inner().nullifier()) .collect(); diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs index b7f5ef180..610334c67 100644 --- a/crates/ntx-builder/src/actor/note_state.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -9,59 +9,28 @@ use miden_protocol::note::Nullifier; use crate::actor::inflight_note::InflightNetworkNote; -// ACCOUNT STATE +// ACCOUNT DELTA TRACKER // ================================================================================================ -/// Tracks the state of a network account and its notes. +/// Tracks committed and inflight account state updates. #[derive(Clone)] -pub struct NetworkAccountNoteState { +pub struct AccountDeltaTracker { /// The committed account state, if any. /// - /// Its possible this is `None` if the account creation transaction is still inflight. + /// This may be `None` if the account creation transaction is still inflight. committed: Option, /// Inflight account updates in chronological order. inflight: VecDeque, - - /// Unconsumed notes of this account. - available_notes: HashMap, - - /// Notes which have been consumed by transactions that are still inflight. - nullified_notes: HashMap, } -impl NetworkAccountNoteState { - /// Creates a new account state from the supplied account and notes. - pub fn new(account: Account, notes: Vec) -> Self { - let account_id = NetworkAccountId::try_from(account.id()) - .expect("only network accounts are used for account state"); - - let mut state = Self { +impl AccountDeltaTracker { + /// Creates a new tracker with the given committed account state. + pub fn new(account: Account) -> Self { + Self { committed: Some(account), inflight: VecDeque::default(), - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), - }; - - for note in notes { - // Currently only support single target network notes in NTB. - assert!( - note.account_id() == account_id, - "Notes supplied into account state must match expected account ID" - ); - state.add_note(note); } - - state - } - - /// Returns an iterator over inflight notes that are not currently within their respective - /// backoff periods based on block number. - pub fn available_notes( - &self, - block_num: &BlockNumber, - ) -> impl Iterator { - self.available_notes.values().filter(|¬e| note.is_available(*block_num)) } /// Appends a delta to the set of inflight account updates. @@ -85,43 +54,72 @@ impl NetworkAccountNoteState { /// Reverts the newest account state delta. /// - /// # Returns - /// - /// Returns `true` if this reverted the account creation delta. The caller _must_ remove this - /// account and associated notes as calls to `account` will panic. + /// Returns `true` if this reverted the account creation delta. The caller _must_ handle + /// cleanup as calls to `latest_account` will panic afterwards. /// /// # Panics /// /// Panics if there are no deltas to revert. - #[must_use = "must remove this account and its notes"] + #[must_use = "must handle account removal if this returns true"] pub fn revert_delta(&mut self) -> bool { self.inflight.pop_back().expect("must have a delta to revert"); self.committed.is_none() && self.inflight.is_empty() } + /// Returns the latest inflight account state. + pub fn latest_account(&self) -> Account { + self.inflight + .back() + .or(self.committed.as_ref()) + .expect("account must have either a committed or inflight state") + .clone() + } + + /// Returns `true` if there are no inflight deltas. + pub fn has_no_inflight(&self) -> bool { + self.inflight.is_empty() + } +} + +// NOTE POOL +// ================================================================================================ + +/// Manages available and nullified notes for a network account. +#[derive(Clone, Default)] +pub struct NotePool { + /// Unconsumed notes available for consumption. + available: HashMap, + + /// Notes consumed by inflight transactions (not yet committed). + nullified: HashMap, +} + +impl NotePool { + /// Returns an iterator over notes that are available and not in backoff. + pub fn available_notes( + &self, + block_num: &BlockNumber, + ) -> impl Iterator { + self.available.values().filter(|¬e| note.is_available(*block_num)) + } + /// Adds a new network note making it available for consumption. pub fn add_note(&mut self, note: SingleTargetNetworkNote) { - self.available_notes.insert(note.nullifier(), InflightNetworkNote::new(note)); + self.available.insert(note.nullifier(), InflightNetworkNote::new(note)); } - /// Removes the note completely. - pub fn revert_note(&mut self, note: Nullifier) { - // Transactions can be reverted out of order. - // - // This means the tx which nullified the note might not have been reverted yet, and the note - // might still be in the nullified - self.available_notes.remove(¬e); - self.nullified_notes.remove(¬e); + /// Removes the note completely (used when reverting note creation). + pub fn remove_note(&mut self, nullifier: Nullifier) { + self.available.remove(&nullifier); + self.nullified.remove(&nullifier); } - /// Marks a note as being consumed. - /// - /// The note data is retained until the nullifier is committed. + /// Marks a note as being consumed by moving it to the nullified set. /// /// Returns `Err(())` if the note does not exist or was already nullified. - pub fn add_nullifier(&mut self, nullifier: Nullifier) -> Result<(), ()> { - if let Some(note) = self.available_notes.remove(&nullifier) { - self.nullified_notes.insert(nullifier, note); + pub fn nullify(&mut self, nullifier: Nullifier) -> Result<(), ()> { + if let Some(note) = self.available.remove(&nullifier) { + self.nullified.insert(nullifier, note); Ok(()) } else { tracing::warn!(%nullifier, "note must be available to nullify"); @@ -129,68 +127,47 @@ impl NetworkAccountNoteState { } } - /// Marks a nullifier as being committed, removing the associated note data entirely. + /// Commits a nullifier, removing the associated note entirely. /// - /// Silently ignores the request if the nullifier is not present, which can happen - /// if the note's transaction wasn't available when the nullifier was added. + /// Silently ignores if the nullifier is not present. pub fn commit_nullifier(&mut self, nullifier: Nullifier) { - // we might not have this if we didn't add it with `add_nullifier` - // in case it's transaction wasn't available in the first place. - // It shouldn't happen practically, since we skip them if the - // relevant account cannot be retrieved via `fetch`. - - let _ = self.nullified_notes.remove(&nullifier); + let _ = self.nullified.remove(&nullifier); } - /// Reverts a nullifier, marking the associated note as available again. + /// Reverts a nullifier, making the note available again. pub fn revert_nullifier(&mut self, nullifier: Nullifier) { // Transactions can be reverted out of order. - // - // The note may already have been fully removed by `revert_note` if the transaction creating - // the note was reverted before the transaction that consumed it. - if let Some(note) = self.nullified_notes.remove(&nullifier) { - self.available_notes.insert(nullifier, note); + if let Some(note) = self.nullified.remove(&nullifier) { + self.available.insert(nullifier, note); } } - /// Drops all notes that have failed to be consumed after a certain number of attempts. + /// Drops all notes that have exceeded the maximum attempt count. pub fn drop_failing_notes(&mut self, max_attempts: usize) { - self.available_notes.retain(|_, note| note.attempt_count() < max_attempts); - } - - /// Returns the latest inflight account state. - pub fn latest_account(&self) -> Account { - self.inflight - .back() - .or(self.committed.as_ref()) - .expect("account must have either a committed or inflight state") - .clone() - } - - /// Returns `true` if there is no inflight state being tracked. - /// - /// This implies this state is safe to remove without losing uncommitted data. - pub fn is_empty(&self) -> bool { - self.inflight.is_empty() - && self.available_notes.is_empty() - && self.nullified_notes.is_empty() + self.available.retain(|_, note| note.attempt_count() < max_attempts); } /// Marks the specified notes as failed. pub fn fail_notes(&mut self, nullifiers: &[Nullifier], block_num: BlockNumber) { for nullifier in nullifiers { - if let Some(note) = self.available_notes.get_mut(nullifier) { + if let Some(note) = self.available.get_mut(nullifier) { note.fail(block_num); } else { tracing::warn!(%nullifier, "failed note is not in account's state"); } } } + + /// Returns `true` if there are no notes being tracked. + pub fn is_empty(&self) -> bool { + self.available.is_empty() && self.nullified.is_empty() + } } -// NETWORK ACCOUNT UPDATE +// NETWORK ACCOUNT EFFECT // ================================================================================================ +/// Represents the effect of a transaction on a network account. #[derive(Clone)] pub enum NetworkAccountEffect { Created(Account), diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 959a119fb..58d6ff4c3 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -155,12 +155,11 @@ impl Coordinator { /// message channel and can process it accordingly. /// /// If an actor fails to receive the event, it will be canceled. + #[tracing::instrument(name = "ntx.coordinator.broadcast", skip_all, fields( + actor.count = self.actor_registry.len(), + event.kind = %event.kind() + ))] pub async fn broadcast(&mut self, event: Arc) { - tracing::debug!( - actor_count = self.actor_registry.len(), - "broadcasting event to all actors" - ); - let mut failed_actors = Vec::new(); // Send event to all actors. From 60fb04f82010e17bc7cde089f474c007ad8a5378 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 3 Feb 2026 08:12:56 -0300 Subject: [PATCH 15/77] fix(rpc): orphaned test (#1623) --- Cargo.lock | 1 + crates/block-producer/Cargo.toml | 2 +- crates/block-producer/src/server/mod.rs | 3 + crates/block-producer/src/server/tests.rs | 215 ++++++++++------------ crates/rpc/src/tests.rs | 3 + 5 files changed, 103 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0dd6de99..f7f713aa7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2784,6 +2784,7 @@ dependencies = [ "miden-node-store", "miden-node-test-macro", "miden-node-utils", + "miden-node-validator", "miden-protocol", "miden-remote-prover-client", "miden-standards", diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index 8437dab3c..023a7a448 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -28,7 +28,6 @@ miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } miden-standards = { workspace = true } -miden-tx = { default-features = true, workspace = true } miden-tx-batch-prover = { workspace = true } rand = { version = "0.9" } thiserror = { workspace = true } @@ -45,6 +44,7 @@ assert_matches = { workspace = true } miden-node-store = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-node-validator = { workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index d3519eb00..fb6963efd 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -40,6 +40,9 @@ use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +#[cfg(test)] +mod tests; + /// The block producer server. /// /// Specifies how to connect to the store, batch prover, and block prover components. diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 453512597..c404a2ae9 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -1,27 +1,25 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_air::{ExecutionProof, HashFunction}; -use miden_node_proto::generated::{ - self as proto, block_producer::api_client as block_producer_client, -}; +use miden_node_proto::generated::block_producer::api_client as block_producer_client; use miden_node_store::{GenesisState, Store}; -use miden_protocol::{ - Digest, - account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, - transaction::ProvenTransactionBuilder, -}; -use miden_tx::utils::Serializable; -use tokio::{net::TcpListener, runtime, task, time::sleep}; +use miden_node_utils::fee::test_fee_params; +use miden_node_validator::Validator; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::random_signer::RandomBlockSigner as _; +use tokio::net::TcpListener; +use tokio::time::sleep; +use tokio::{runtime, task}; use tonic::transport::{Channel, Endpoint}; -use winterfell::Proof; +use url::Url; -use crate::{BlockProducer, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MAX_TXS_PER_BATCH}; +use crate::{BlockProducer, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +/// Tests that the block producer starts up correctly even when the store is not initially +/// available. The block producer should retry with exponential backoff until the store becomes +/// available, then start serving requests. #[tokio::test] async fn block_producer_startup_is_robust_to_network_failures() { - // This test starts the block producer and tests that it starts serving only after the store - // is started. - // get the addresses for the store and block producer let store_addr = { let store_listener = @@ -36,113 +34,103 @@ async fn block_producer_startup_is_robust_to_network_failures() { .expect("Failed to get block-producer address") }; - let ntx_builder_addr = { - let ntx_builder_address = TcpListener::bind("127.0.0.1:0") - .await - .expect("failed to bind the ntx builder address"); - ntx_builder_address.local_addr().expect("failed to get ntx builder address") + let validator_addr = { + let validator_listener = + TcpListener::bind("127.0.0.1:0").await.expect("failed to bind validator"); + validator_listener.local_addr().expect("failed to get validator address") }; - // start the block producer + let grpc_timeout = Duration::from_secs(30); + + // start the validator + task::spawn(async move { + Validator { + address: validator_addr, + grpc_timeout, + signer: SecretKey::random(), + } + .serve() + .await + .unwrap(); + }); + + // start the block producer BEFORE the store is available + // this tests the exponential backoff behavior + let store_url = Url::parse(&format!("http://{store_addr}")).expect("Failed to parse store URL"); + let validator_url = + Url::parse(&format!("http://{validator_addr}")).expect("Failed to parse validator URL"); task::spawn(async move { BlockProducer { block_producer_address: block_producer_addr, - store_address: store_addr, - ntx_builder_address: Some(ntx_builder_addr), + store_url, + validator_url, batch_prover_url: None, - block_prover_url: None, batch_interval: Duration::from_millis(500), block_interval: Duration::from_millis(500), - max_txs_per_batch: SERVER_MAX_TXS_PER_BATCH, - max_batches_per_block: SERVER_MAX_BATCHES_PER_BLOCK, + max_txs_per_batch: DEFAULT_MAX_TXS_PER_BATCH, + max_batches_per_block: DEFAULT_MAX_BATCHES_PER_BLOCK, + grpc_timeout, + mempool_tx_capacity: NonZeroUsize::new(100).unwrap(), } .serve() .await .unwrap(); }); - // test: connecting to the block producer should fail until the store is started + // test: connecting to the block producer should fail because the store is not yet started + // (and therefore the block producer is not yet listening) let block_producer_endpoint = Endpoint::try_from(format!("http://{block_producer_addr}")).expect("valid url"); let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await; - assert!(block_producer_client.is_err()); + assert!( + block_producer_client.is_err(), + "Block producer should not be available before store is started" + ); // start the store let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let store_runtime = { - let genesis_state = GenesisState::new(vec![], 1, 1); - Store::bootstrap(genesis_state.clone(), data_directory.path()) - .expect("store should bootstrap"); - let dir = data_directory.path().to_path_buf(); - let rpc_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); - let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind store ntx-builder gRPC endpoint"); - let block_producer_listener = TcpListener::bind(store_addr) - .await - .expect("store should bind the block-producer port"); - // in order to later kill the store, we need to spawn a new runtime and run the store on - // it. That allows us to kill all the tasks spawned by the store when we - // kill the runtime. - let store_runtime = - runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); - store_runtime.spawn(async move { - Store { - rpc_listener, - ntx_builder_listener, - block_producer_listener, - data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + let store_runtime = start_store(store_addr, data_directory.path()).await; + + // wait for the block producer's exponential backoff to connect to the store + // use a retry loop since CI environments may be slower + let block_producer_client = { + let mut attempts = 0; + loop { + attempts += 1; + match block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await { + Ok(client) => break client, + Err(_) if attempts < 30 => { + sleep(Duration::from_millis(200)).await; + }, + Err(e) => panic!( + "block producer client should connect after store is started (after {attempts} attempts): {e}" + ), } - .serve() - .await - .expect("store should start serving"); - }); - store_runtime + } }; - // we need to wait for the exponential backoff of the block producer to connect to the store - sleep(Duration::from_secs(1)).await; + // test: status request against block-producer should succeed + let response = send_status_request(block_producer_client).await; + assert!(response.is_ok(), "Status request should succeed, got: {:?}", response.err()); - let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint) - .await - .expect("block producer client should connect"); + // verify the response contains expected data + let status = response.unwrap().into_inner(); + assert_eq!(status.status, "connected"); - // test: request against block-producer api should succeed - let response = send_request(block_producer_client.clone(), 0).await; - assert!(response.is_ok()); - - // kill the store - shutdown_store(store_runtime).await; - - // test: request against block-producer api should fail immediately - let response = send_request(block_producer_client.clone(), 1).await; - assert!(response.is_err()); - - // test: restart the store and request should succeed - let store_runtime = restart_store(store_addr, data_directory.path()).await; - let response = send_request(block_producer_client.clone(), 2).await; - assert!(response.is_ok()); - - // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + // Shutdown the store before data_directory is dropped to allow the database to flush properly shutdown_store(store_runtime).await; } -/// Shuts down the store runtime properly to allow RocksDB to flush before the temp directory is -/// deleted. -async fn shutdown_store(store_runtime: runtime::Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); -} - -/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. -async fn restart_store( +/// Starts the store with a fresh genesis state and returns the runtime handle. +async fn start_store( store_addr: std::net::SocketAddr, data_directory: &std::path::Path, ) -> runtime::Runtime { + let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, SecretKey::random()); + Store::bootstrap(genesis_state.clone(), data_directory).expect("store should bootstrap"); + + let dir = data_directory.to_path_buf(); let rpc_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -151,7 +139,8 @@ async fn restart_store( let block_producer_listener = TcpListener::bind(store_addr) .await .expect("store should bind the block-producer port"); - let dir = data_directory.to_path_buf(); + + // Use a separate runtime so we can kill all store tasks later let store_runtime = runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); store_runtime.spawn(async move { @@ -159,8 +148,9 @@ async fn restart_store( rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover_url: None, data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + grpc_timeout: Duration::from_secs(30), } .serve() .await @@ -169,32 +159,17 @@ async fn restart_store( store_runtime } -/// Creates a dummy transaction and submits it to the block producer. -async fn send_request( +/// Shuts down the store runtime properly to allow the database to flush before the temp directory +/// is deleted. +async fn shutdown_store(store_runtime: runtime::Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Sends a status request to the block producer to verify connectivity. +async fn send_status_request( mut client: block_producer_client::ApiClient, - i: u8, -) -> Result, tonic::Status> -{ - let tx = ProvenTransactionBuilder::new( - AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Private, - ), - Digest::default(), - [i; 32].try_into().unwrap(), - Digest::default(), - 0.into(), - Digest::default(), - u32::MAX.into(), - ExecutionProof::new(Proof::new_dummy(), HashFunction::default()), - ) - .build() - .unwrap(); - let request = proto::transaction::ProvenTransaction { - transaction: tx.to_bytes(), - transaction_replay: None, - }; - client.submit_proven_transaction(request).await +) -> Result, tonic::Status> { + client.status(()).await } diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 3d87c8328..a0b7854e5 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -247,6 +247,9 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { let (_, rpc_addr, store_addr) = start_rpc().await; let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; + // Wait for the store to be ready before sending requests. + tokio::time::sleep(Duration::from_millis(100)).await; + // Override the client so that the ACCEPT header is not set. let mut rpc_client = miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) From e25efc7dc68a4b2ed38450731c9329734e900870 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 3 Feb 2026 12:33:59 +0100 Subject: [PATCH 16/77] fix(store): missing rocksdb linkage for tests (#1633) --- Cargo.lock | 1 + crates/rocksdb-cxx-linkage-fix/src/lib.rs | 4 +++- crates/store/Cargo.toml | 3 +++ crates/store/build.rs | 3 +++ crates/utils/Cargo.toml | 3 +++ 5 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index f7f713aa7..795cc0981 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2993,6 +2993,7 @@ dependencies = [ "http-body-util", "itertools 0.14.0", "lru 0.16.3", + "miden-node-rocksdb-cxx-linkage-fix", "miden-protocol", "opentelemetry", "opentelemetry-otlp", diff --git a/crates/rocksdb-cxx-linkage-fix/src/lib.rs b/crates/rocksdb-cxx-linkage-fix/src/lib.rs index eeaa456d0..9eaae82fd 100644 --- a/crates/rocksdb-cxx-linkage-fix/src/lib.rs +++ b/crates/rocksdb-cxx-linkage-fix/src/lib.rs @@ -6,6 +6,7 @@ use std::env; pub fn configure() { println!("cargo:rerun-if-env-changed=ROCKSDB_COMPILE"); + println!("cargo:rerun-if-env-changed=ROCKSDB_LIB_DIR"); println!("cargo:rerun-if-env-changed=ROCKSDB_STATIC"); println!("cargo:rerun-if-env-changed=CXXSTDLIB"); let target = env::var("TARGET").unwrap_or_default(); @@ -18,8 +19,9 @@ fn should_link_cpp_stdlib() -> bool { let rocksdb_compile = env::var("ROCKSDB_COMPILE").unwrap_or_default(); let rocksdb_compile_disabled = matches!(rocksdb_compile.as_str(), "0" | "false" | "FALSE"); let rocksdb_static = env::var("ROCKSDB_STATIC").is_ok(); + let rocksdb_lib_dir_set = env::var("ROCKSDB_LIB_DIR").is_ok(); - rocksdb_compile_disabled && rocksdb_static + rocksdb_lib_dir_set || (rocksdb_static && rocksdb_compile_disabled) } fn link_cpp_stdlib(target: &str) { diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index ba3f1fd47..bbdc9ef41 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -49,6 +49,9 @@ tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] assert_matches = { workspace = true } criterion = { version = "0.5" } diff --git a/crates/store/build.rs b/crates/store/build.rs index d08f3fd0e..a911bea19 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -1,9 +1,12 @@ // This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in // `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . + fn main() { println!("cargo:rerun-if-changed=./src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // println!("cargo:rerun-if-changed=Cargo.toml"); + + miden_node_rocksdb_cxx_linkage_fix::configure(); } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index e61930937..2c5fea6e5 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -42,5 +42,8 @@ tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] thiserror = { workspace = true } From 2965984a37408ea9152adb7b0ab26f31b0675c8f Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 3 Feb 2026 14:22:17 +0200 Subject: [PATCH 17/77] ci(docker): use `cargo chef` and cache to github (#1631) --- .github/workflows/build-docker.yml | 34 ++++------------------ bin/node/Dockerfile | 45 +++++++++++++++++------------- 2 files changed, 32 insertions(+), 47 deletions(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 0e7fe0c07..b259c23fd 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -12,38 +12,16 @@ permissions: jobs: docker-build: - strategy: - matrix: - component: [node] runs-on: Linux-ARM64-Runner - name: Build ${{ matrix.component }} steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Configure AWS credentials - if: github.event.pull_request.head.repo.fork == false - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: ${{ secrets.AWS_REGION }} - role-to-assume: ${{ secrets.AWS_ROLE }} - role-session-name: GithubActionsSession - - - name: Set cache parameters - if: github.event.pull_request.head.repo.fork == false - run: | - echo "CACHE_FROM=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - echo "CACHE_TO=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - with: - cache-binary: true - - name: Build Docker image - uses: docker/build-push-action@v5 + - name: Build and push + uses: docker/build-push-action@v6 with: push: false - file: ./bin/${{ matrix.component }}/Dockerfile - cache-from: ${{ env.CACHE_FROM || '' }} - cache-to: ${{ env.CACHE_TO || '' }} + file: ./bin/node/Dockerfile + cache-from: type=gha + # Only save cache on push into next + cache-to: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' && 'type=gha,mode=max' || '' }} diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 832b0bb8d..9778daec8 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,39 +1,47 @@ -FROM rust:1.90-slim-bullseye AS builder - +FROM rust:1.90-slim-bullseye AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y \ + llvm \ + clang \ + libclang-dev \ + cmake \ + pkg-config \ + libssl-dev \ + libsqlite3-dev \ + ca-certificates && \ rm -rf /var/lib/apt/lists/* - +RUN cargo install cargo-chef WORKDIR /app -COPY ./Cargo.toml . -COPY ./Cargo.lock . -COPY ./bin ./bin -COPY ./crates ./crates -COPY ./proto ./proto -RUN cargo install --path bin/node --locked +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json -FROM debian:bullseye-slim +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --locked --bin miden-node -# Update machine & install required packages -# The installation of sqlite3 is needed for correct function of the SQLite database +# Base line runtime image with runtime dependencies installed. +FROM debian:bullseye-slim AS runtime-base RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y --no-install-recommends \ - sqlite3 \ + apt-get install -y --no-install-recommends sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node - +FROM runtime-base AS runtime +COPY --from=builder /app/target/release/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ org.opencontainers.image.url=https://0xMiden.github.io/ \ org.opencontainers.image.documentation=https://github.com/0xMiden/miden-node \ org.opencontainers.image.source=https://github.com/0xMiden/miden-node \ org.opencontainers.image.vendor=Miden \ org.opencontainers.image.licenses=MIT - ARG CREATED ARG VERSION ARG COMMIT @@ -43,6 +51,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 - # Miden node does not spawn sub-processes, so it can be used as the PID1 CMD miden-node From 6f7737c45df9c44f34053a8e2450e5cbf57428d4 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 3 Feb 2026 15:50:30 +0100 Subject: [PATCH 18/77] fix: rocksdb missing build.rs (#1634) --- crates/utils/build.rs | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 crates/utils/build.rs diff --git a/crates/utils/build.rs b/crates/utils/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/crates/utils/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} From 09f6d4cf0c3152a9cfea53026d50b4ccc54296f8 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 4 Feb 2026 00:46:49 -0300 Subject: [PATCH 19/77] chore: wrap PartialBlockchain in Arc (#1632) --- crates/ntx-builder/src/actor/account_state.rs | 5 +++- crates/ntx-builder/src/actor/execute.rs | 7 +++-- crates/ntx-builder/src/builder.rs | 30 ++++++++++++++----- 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index e82a18929..b58cfd692 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -1,5 +1,6 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::num::NonZeroUsize; +use std::sync::Arc; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; @@ -40,7 +41,9 @@ pub struct TransactionCandidate { pub chain_tip_header: BlockHeader, /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, + /// + /// Wrapped in `Arc` to avoid expensive clones when reading the chain state. + pub chain_mmr: Arc, } // NETWORK ACCOUNT STATE diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 671270486..59e9cdb4f 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -327,7 +327,8 @@ impl NtxContext { struct NtxDataStore { account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + /// The chain MMR, wrapped in `Arc` to avoid expensive clones when reading the chain state. + chain_mmr: Arc, mast_store: TransactionMastStore, /// Store client for retrieving note scripts. store: StoreClient, @@ -362,7 +363,7 @@ impl NtxDataStore { fn new( account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + chain_mmr: Arc, store: StoreClient, script_cache: LruCache, ) -> Self { @@ -421,7 +422,7 @@ impl DataStore for NtxDataStore { .await; let partial_account = PartialAccount::from(&self.account); - Ok((partial_account, self.reference_block.clone(), self.chain_mmr.clone())) + Ok((partial_account, self.reference_block.clone(), (*self.chain_mmr).clone())) } } diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 71abe49ee..14be4ef31 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -22,13 +22,26 @@ use crate::store::StoreClient; // ================================================================================================ /// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and -/// all account actors managed by the [`Coordinator`] +/// all account actors managed by the [`Coordinator`]. +/// +/// The chain MMR stored here contains: +/// - The MMR peaks. +/// - Block headers and authentication paths for the last [`NtxBuilderConfig::max_block_count`] +/// blocks. +/// +/// Authentication paths for older blocks are pruned because the NTX builder executes all notes as +/// "unauthenticated" (see [`InputNotes::from_unauthenticated_notes`]) and therefore does not need +/// to prove that input notes were created in specific past blocks. #[derive(Debug, Clone)] pub struct ChainState { /// The current tip of the chain. pub chain_tip_header: BlockHeader, - /// A partial representation of the latest state of the chain. - pub chain_mmr: PartialBlockchain, + /// A partial representation of the chain MMR. + /// + /// Contains block headers and authentication paths for the last + /// [`NtxBuilderConfig::max_block_count`] blocks only, since all notes are executed as + /// unauthenticated. + pub chain_mmr: Arc, } impl ChainState { @@ -36,12 +49,15 @@ impl ChainState { pub(crate) fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { let chain_mmr = PartialBlockchain::new(chain_mmr, []) .expect("partial blockchain should build from partial mmr"); - Self { chain_tip_header, chain_mmr } + Self { + chain_tip_header, + chain_mmr: Arc::new(chain_mmr), + } } /// Consumes the chain state and returns the chain tip header and the partial blockchain as a /// tuple. - pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + pub fn into_parts(self) -> (BlockHeader, Arc) { (self.chain_tip_header, self.chain_mmr) } } @@ -228,7 +244,7 @@ impl NetworkTransactionBuilder { // Update MMR which lags by one block. let mmr_tip = chain_state.chain_tip_header.clone(); - chain_state.chain_mmr.add_block(&mmr_tip, true); + Arc::make_mut(&mut chain_state.chain_mmr).add_block(&mmr_tip, true); // Set the new tip. chain_state.chain_tip_header = tip; @@ -239,6 +255,6 @@ impl NetworkTransactionBuilder { .chain_length() .as_usize() .saturating_sub(self.config.max_block_count)) as u32; - chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + Arc::make_mut(&mut chain_state.chain_mmr).prune_to(..pruned_block_height.into()); } } From 20da8a9d58a909d990adbafd128c2c70554adfd1 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 6 Feb 2026 12:24:55 +0200 Subject: [PATCH 20/77] chore: bump `time` and `bytes` for dependabot security alerts (#1644) --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 795cc0981..65360401b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -471,9 +471,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "bzip2-sys" @@ -5605,9 +5605,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -5626,9 +5626,9 @@ checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", From 133e51400db0811f85ae8be1e7858fb6124f22b1 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 6 Feb 2026 21:35:03 +0200 Subject: [PATCH 21/77] ci: organize by trigger and improve caching (#1637) --- .github/workflows/ci.yml | 267 ++++++++++++++++++++++++ .github/workflows/lint.yml | 148 ------------- .github/workflows/network-monitor.yml | 37 ---- .github/workflows/nightly.yml | 68 ++++++ .github/workflows/stress-test-check.yml | 53 ----- .github/workflows/test-beta.yml | 29 --- .github/workflows/test.yml | 47 ----- 7 files changed, 335 insertions(+), 314 deletions(-) create mode 100644 .github/workflows/ci.yml delete mode 100644 .github/workflows/lint.yml delete mode 100644 .github/workflows/network-monitor.yml create mode 100644 .github/workflows/nightly.yml delete mode 100644 .github/workflows/stress-test-check.yml delete mode 100644 .github/workflows/test-beta.yml delete mode 100644 .github/workflows/test.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..016aeba77 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,267 @@ +# Continuous integration jobs. +# +# These get run on every pull-request, with github cache updated on push into `next`. +name: CI + +permissions: + contents: read + +on: + workflow_dispatch: + push: + branches: + - main + - next + paths-ignore: + - "**.md" + - "**.txt" + - "docs/**" + pull_request: + paths-ignore: + - "**.md" + - "**.txt" + - "docs/**" + +env: + # Shared prefix key for the rust cache. + # + # This provides a convenient way to evict old or corrupted cache. + RUST_CACHE_KEY: rust-cache-2026.02.02 + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + +# Limits workflow concurrency to only the latest commit in the PR. +concurrency: + group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" + cancel-in-progress: true + +jobs: + # =============================================================================================== + # Conventional builds, lints and tests that re-use a single cache for efficiency + # =============================================================================================== + + # Normal cargo build that populates a cache for all subsequent jobs to re-use. + build: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - name: Rustup + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ github.workflow }}-build + prefix-key: ${{ env.RUST_CACHE_KEY }} + save-if: ${{ github.ref == 'refs/heads/next' }} + - name: cargo build + run: cargo build --workspace --all-targets --locked + + clippy: + name: lint - clippy + runs-on: ubuntu-24.04 + needs: [build] + steps: + - uses: actions/checkout@v6 + - name: Rustup + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ github.workflow }}-build + prefix-key: ${{ env.RUST_CACHE_KEY }} + save-if: false + - name: clippy + run: cargo clippy --locked --all-targets --all-features --workspace -- -D warnings + + tests: + runs-on: ubuntu-24.04 + needs: [build] + timeout-minutes: 30 + steps: + - uses: actions/checkout@v6 + - name: Rustup + run: rustup update --no-self-update + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 + - uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ github.workflow }}-build + prefix-key: ${{ env.RUST_CACHE_KEY }} + save-if: false + - name: Build tests + run: cargo nextest run --all-features --workspace --no-run + - name: Run tests + run: cargo nextest run --all-features --workspace + - name: Doc tests + run: cargo test --doc --workspace --all-features + + doc: + needs: [build] + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Rustup + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ github.workflow }}-build + prefix-key: ${{ env.RUST_CACHE_KEY }} + save-if: false + - name: Build docs + run: cargo doc --no-deps --workspace --all-features --locked + + # Ensures our checked-in protobuf generated code is aligned to the protobuf schema. + # + # We do this by rebuilding the generated code and ensuring there is no diff. + proto: + name: gRPC codegen + needs: [build] + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + - name: Rustup + run: rustup update --no-self-update + - name: Install protobuf + run: sudo apt-get update && sudo apt-get install -y protobuf-compiler + - uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ github.workflow }}-build + prefix-key: ${{ env.RUST_CACHE_KEY }} + save-if: false + - name: Rebuild protos + run: BUILD_PROTO=1 cargo check --all-features --all-targets --locked --workspace + - name: Diff check + run: git diff --exit-code + + # Ensure the stress-test still functions by running some cheap benchmarks. + stress-test: + name: stress test + needs: [build] + runs-on: ubuntu-24.04 + timeout-minutes: 20 + env: + DATA_DIR: /tmp/store + steps: + - uses: actions/checkout@v6 + - name: Rustup + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ github.workflow }}-build + prefix-key: ${{ env.RUST_CACHE_KEY }} + save-if: false + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 + - name: Build + run: cargo build --bin miden-node-stress-test --locked + - name: Create store directory + run: mkdir -p ${{ env.DATA_DIR }} + - name: Seed the store + run: | + cargo run --bin miden-node-stress-test seed-store \ + --data-directory ${{ env.DATA_DIR }} \ + --num-accounts 500 --public-accounts-percentage 50 + - name: Benchmark state sync + run: | + cargo run --bin miden-node-stress-test benchmark-store \ + --data-directory ${{ env.DATA_DIR }} \ + --iterations 10 --concurrency 1 sync-state + - name: Benchmark notes sync + run: | + cargo run --bin miden-node-stress-test benchmark-store \ + --data-directory ${{ env.DATA_DIR }} \ + --iterations 10 --concurrency 1 sync-notes + - name: Benchmakr nullifiers sync + run: | + cargo run --bin miden-node-stress-test benchmark-store \ + --data-directory ${{ env.DATA_DIR }} \ + --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 + + # =============================================================================================== + # WASM related jobs + # =============================================================================================== + + # Tests the miden-remote-prover-client WASM support. + # + # The WASM build is incompatible with the build job's cache, thankfully this compilation is fairly + # quick so we don't need a separate cache here. + client-wasm: + name: wasm targets + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + - name: Rustup + run: rustup update --no-self-update + - name: cargo build + run: | + cargo build --locked -p miden-remote-prover-client \ + --target wasm32-unknown-unknown --no-default-features \ + --features batch-prover,block-prover,tx-prover # no-std compatible build + - name: clippy + run: | + cargo clippy --locked -p miden-remote-prover-client \ + --target wasm32-unknown-unknown --no-default-features \ + --features batch-prover,block-prover,tx-prover -- -D warnings + + # =============================================================================================== + # Jobs that don't require caching to be efficient + # =============================================================================================== + + typos: + name: lint - spelling + runs-on: ubuntu-24.04 + timeout-minutes: 5 + steps: + - uses: actions/checkout@v6 + - uses: taiki-e/install-action@v2 + with: + tool: typos@1.42.0 + - run: make typos-check + + fmt: + name: lint - rustfmt + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + - name: Rustup +nightly + run: | + rustup update --no-self-update nightly + rustup +nightly component add rustfmt + - name: Fmt + run: make format-check + + toml: + name: lint - toml fmt + runs-on: ubuntu-24.04 + timeout-minutes: 5 + steps: + - uses: actions/checkout@v6 + - uses: taiki-e/install-action@v2 + with: + tool: taplo-cli@0.10.0 + - run: make toml-check + + workspace-lints: + name: lint - workspace toml + runs-on: ubuntu-24.04 + timeout-minutes: 5 + steps: + - uses: actions/checkout@v6 + - uses: taiki-e/install-action@v2 + with: + tool: cargo-workspace-lints@0.1.4 + - run: make workspace-check + + unused_deps: + name: lint - unused deps + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + - name: machete + uses: bnjbvr/cargo-machete@main diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index 0f4ba580e..000000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,148 +0,0 @@ -# Runs linting related jobs. - -name: lint - -on: - push: - branches: [main, next] - pull_request: - types: [opened, reopened, synchronize] - -# Limits workflow concurrency to only the latest commit in the PR. -concurrency: - group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" - cancel-in-progress: true - -permissions: - contents: read - -env: - # Reduce cache usage by removing debug information. - CARGO_PROFILE_DEV_DEBUG: 0 - -jobs: - typos: - runs-on: ubuntu-24.04 - timeout-minutes: 5 - steps: - - uses: actions/checkout@v4 - - uses: taiki-e/install-action@v2 - with: - tool: typos@1.42.0 - - run: make typos-check - - rustfmt: - name: rustfmt - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@main - - name: Rustup - run: | - rustup update --no-self-update nightly - rustup +nightly component add rustfmt - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Fmt - run: make format-check - - clippy: - name: clippy - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@main - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Rustup - run: | - rustup update --no-self-update - rustup component add clippy - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Clippy - run: make clippy - - toml: - runs-on: ubuntu-24.04 - timeout-minutes: 5 - steps: - - uses: actions/checkout@v4 - - uses: taiki-e/install-action@v2 - with: - tool: taplo-cli@0.10.0 - - run: make toml-check - - workspace-lints: - runs-on: ubuntu-24.04 - timeout-minutes: 5 - steps: - - uses: actions/checkout@v4 - - uses: taiki-e/install-action@v2 - with: - tool: cargo-workspace-lints@0.1.4 - - run: | - make workspace-check - - doc: - name: doc - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@main - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Rustup - run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Build docs - run: cargo doc --no-deps --workspace --all-features --locked - - unused_deps: - name: check for unused dependencies - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@main - - name: machete - uses: bnjbvr/cargo-machete@main - - proto: - name: proto check - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@main - - name: Rustup - run: rustup update --no-self-update - - name: Install protobuf - run: sudo apt-get update && sudo apt-get install -y protobuf-compiler - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Rebuild protos - run: make check - - name: Diff check - run: git diff --exit-code - - check-features: - name: check all feature combinations - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Install rust - run: rustup update --no-self-update - - name: Install cargo-hack - uses: taiki-e/install-action@cargo-hack - - name: Check all feature combinations - run: make check-features diff --git a/.github/workflows/network-monitor.yml b/.github/workflows/network-monitor.yml deleted file mode 100644 index ca89a4df9..000000000 --- a/.github/workflows/network-monitor.yml +++ /dev/null @@ -1,37 +0,0 @@ -# Runs build and install checks for the network monitor binary. - -name: network-monitor - -on: - push: - branches: [main, next] - pull_request: - types: [opened, reopened, synchronize] - -# Limits workflow concurrency to only the latest commit in the PR. -concurrency: - group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" - cancel-in-progress: true - -permissions: - contents: read - -env: - # Reduce cache usage by removing debug information. - CARGO_PROFILE_DEV_DEBUG: 0 - -jobs: - check: - name: check - runs-on: ubuntu-24.04 - timeout-minutes: 15 - steps: - - uses: actions/checkout@v4 - - name: Rustup - run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Check network monitor (release) - run: | - BUILD_PROTO=1 cargo check --locked -p miden-network-monitor --release diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 000000000..a5d6e3cae --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,68 @@ +# Checks that run once per day. +# +# These are generally expensive jobs that don't provide enough utility to run on _every_ PR. +name: nightly + +on: + workflow_dispatch: + schedule: + - cron: "0 6 * * *" # Everyday at 06:00am UTC + +permissions: + contents: read + +jobs: + # Run tests on the beta channel to provide feedback for Rust team. + beta-test: + name: test on beta channel + runs-on: ubuntu-24.04 + timeout-minutes: 30 + steps: + - uses: actions/checkout@v6 + with: + ref: 'next' + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - name: Rustup + run: rustup install beta && rustup default beta + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 + - name: Run tests + run: make test + + # Check all feature combinations work individually. + # + # This check is too expensive to run on every PR, both in terms of CPU and cache size. + check-features: + name: feature combinations + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + with: + ref: 'next' + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - name: Install rust + run: rustup update --no-self-update + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack + - name: Check all feature combinations + run: make check-features + + # Check that our MSRV complies with our specified rust version. + msrv: + name: msrv check + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + with: + ref: 'next' + - name: check + run: | + export PATH="$HOME/.cargo/bin:$PATH" + ./scripts/check-msrv.sh diff --git a/.github/workflows/stress-test-check.yml b/.github/workflows/stress-test-check.yml deleted file mode 100644 index 383440b9e..000000000 --- a/.github/workflows/stress-test-check.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Runs stress test related jobs. - -name: stress-test-check - -on: - push: - branches: [main, next] - pull_request: - types: [opened, reopened, synchronize] - -permissions: - contents: read - -# Limits workflow concurrency to only the latest commit in the PR. -concurrency: - group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" - cancel-in-progress: true - -env: - # Reduce cache usage by removing debug information. - CARGO_PROFILE_DEV_DEBUG: 0 - -jobs: - stress-test-check: - name: stress-test-check - runs-on: Linux-ARM64-Runner - timeout-minutes: 20 - steps: - - uses: actions/checkout@main - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Rustup - run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@v2 - with: - tool: nextest@0.9.122 - - name: Install stress test - run: make install-stress-test - - name: Create directory for stress test store - run: mkdir -p stress-test-store - - name: Seed stress test store - run: miden-node-stress-test seed-store --data-directory stress-test-store --num-accounts 500 --public-accounts-percentage 50 - - name: Run sync state benchmark - run: miden-node-stress-test benchmark-store --data-directory stress-test-store --iterations 10 --concurrency 1 sync-state - - name: Run sync notes benchmark - run: miden-node-stress-test benchmark-store --data-directory stress-test-store --iterations 10 --concurrency 1 sync-notes - - name: Run sync nullifiers benchmark - run: miden-node-stress-test benchmark-store --data-directory stress-test-store --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 diff --git a/.github/workflows/test-beta.yml b/.github/workflows/test-beta.yml deleted file mode 100644 index 07b9705fd..000000000 --- a/.github/workflows/test-beta.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Beta toolchain - -on: - schedule: - - cron: "0 6 * * *" # Everyday at 06:00am UTC - -permissions: - contents: read - -jobs: - test: - name: test - runs-on: ubuntu-24.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - with: - ref: 'next' - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Rustup - run: rustup install beta && rustup default beta - - uses: taiki-e/install-action@v2 - with: - tool: nextest@0.9.122 - - name: Run tests - run: make test diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 7760225a6..000000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Runs testing related jobs. - -name: test - -on: - push: - branches: [main, next] - pull_request: - types: [opened, reopened, synchronize] - -permissions: - contents: read - -# Limits workflow concurrency to only the latest commit in the PR. -concurrency: - group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" - cancel-in-progress: true - -env: - # Reduce cache usage by removing debug information. - # This works for tests as well because TEST inherits from DEV. - CARGO_PROFILE_DEV_DEBUG: 0 - -jobs: - test: - name: test - runs-on: Linux-ARM64-Runner - timeout-minutes: 30 - steps: - - uses: actions/checkout@main - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Rustup - run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@v2 - with: - tool: nextest@0.9.122 - - name: Run tests - run: make test - - name: Doc tests - run: cargo test --doc --workspace --all-features - From 3abf01d06973e6d6e2581d9cc7da113e6e6aeeca Mon Sep 17 00:00:00 2001 From: SantiagoPittella Date: Fri, 6 Feb 2026 17:41:07 -0300 Subject: [PATCH 22/77] chore: add missing line between changelog subtitles --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b732e18d0..b4e404387 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). + ## v0.13.5 (TBD) - OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/miden-node/pull/1643)). From 6de68e562f7d47edb80749ce4a66fa69bdcdab8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= <4142+huitseeker@users.noreply.github.com> Date: Sat, 7 Feb 2026 17:30:48 +0900 Subject: [PATCH 23/77] chore: improve telemetry by adding track_caller to helpers (#1651) --- CHANGELOG.md | 1 + crates/utils/src/panic.rs | 1 + crates/utils/src/tracing/grpc.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4e404387..4aefc8673 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). +- Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). ## v0.13.5 (TBD) diff --git a/crates/utils/src/panic.rs b/crates/utils/src/panic.rs index 1b899ee61..f524db7cd 100644 --- a/crates/utils/src/panic.rs +++ b/crates/utils/src/panic.rs @@ -8,6 +8,7 @@ pub use tower_http::catch_panic::CatchPanicLayer; /// [`tower_http::catch_panic::ResponseForPanic`] trait. /// /// This should be added to tonic server builder as a layer via [`CatchPanicLayer::custom()`]. +#[track_caller] pub fn catch_panic_layer_fn(err: Box) -> Response> { // Log the panic error details. let err = stringify_panic_error(err); diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index f5d0951bf..985a2e4ba 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -9,6 +9,7 @@ use crate::tracing::OpenTelemetrySpanExt; /// The span name is dynamically set using the HTTP path via the `otel.name` field. /// Additionally also pulls in remote tracing context which allows the server trace to be connected /// to the client's origin trace. +#[track_caller] pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { // A gRPC request's path ends with `..//`. let mut path_segments = request.uri().path().rsplit('/'); From 67b8170c5753d3a814c2cd94ef2dc59a507c1266 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Mon, 9 Feb 2026 16:15:00 +1300 Subject: [PATCH 24/77] chore: Add validator service file (#1638) --- .github/actions/debian/action.yml | 35 +++++++++++---------- .github/workflows/publish-debian-all.yml | 16 +++++----- .github/workflows/publish-debian.yml | 10 +++--- packaging/node/miden-validator.service | 16 ++++++++++ packaging/node/postinst | 39 +++++++++++++----------- packaging/node/postrm | 9 ++++-- 6 files changed, 75 insertions(+), 50 deletions(-) create mode 100644 packaging/node/miden-validator.service diff --git a/.github/actions/debian/action.yml b/.github/actions/debian/action.yml index 888dec4d5..302e29e81 100644 --- a/.github/actions/debian/action.yml +++ b/.github/actions/debian/action.yml @@ -28,15 +28,15 @@ inputs: options: - miden-node - miden-remote-prover - service: + package: required: true - description: The service to build the packages for. + description: The Debian package name. type: choice options: - miden-node - miden-prover - miden-prover-proxy - package: + packaging_dir: required: true description: Name of packaging directory. type: choice @@ -78,7 +78,7 @@ runs: - name: Create package directories shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} mkdir -p \ packaging/deb/$pkg/DEBIAN \ packaging/deb/$pkg/usr/bin \ @@ -89,15 +89,18 @@ runs: - name: Copy package install scripts shell: bash run: | - svc=${{ inputs.service }} pkg=${{ inputs.package }} + pkg_dir=${{ inputs.packaging_dir }} crate=${{ inputs.crate_dir }} - git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$svc/lib/systemd/system/$svc.env - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/$svc.service > packaging/deb/$svc/lib/systemd/system/$svc.service - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postinst > packaging/deb/$svc/DEBIAN/postinst - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postrm > packaging/deb/$svc/DEBIAN/postrm - chmod 0775 packaging/deb/$svc/DEBIAN/postinst - chmod 0775 packaging/deb/$svc/DEBIAN/postrm + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postinst > packaging/deb/$pkg/DEBIAN/postinst + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postrm > packaging/deb/$pkg/DEBIAN/postrm + for service_file in $(ls packaging/$pkg_dir/*.service | sed "s/.*miden/miden/g"); do + svc=$(echo $service_file | sed "s/.service//g") + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/$service_file > packaging/deb/$pkg/lib/systemd/system/$service_file + git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$pkg/lib/systemd/system/$svc.env + done + chmod 0775 packaging/deb/$pkg/DEBIAN/postinst + chmod 0775 packaging/deb/$pkg/DEBIAN/postrm - name: Create control files shell: bash @@ -108,7 +111,7 @@ runs: # Control file's version field must be x.y.z format so strip the rest. version=$(git describe --tags --abbrev=0 | sed 's/[^0-9.]//g' ) - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} cat > packaging/deb/$pkg/DEBIAN/control << EOF Package: $pkg Version: $version @@ -132,14 +135,14 @@ runs: - name: Copy binary files shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} bin=${{ inputs.crate }} cp -p ./bin/$bin packaging/deb/$pkg/usr/bin/ - name: Build packages shell: bash run: | - dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.service }} + dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.package }} # Save the .deb files, delete the rest. mv packaging/deb/*.deb . @@ -148,12 +151,12 @@ runs: - name: Package names shell: bash run: | - echo "package=${{ inputs.service }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV + echo "package=${{ inputs.package }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV - name: Rename package files shell: bash run: | - mv ${{ inputs.service }}.deb ${{ env.package }} + mv ${{ inputs.package}}.deb ${{ env.package }} - name: shasum packages shell: bash diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index a6d63d503..76e65d0eb 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -39,8 +39,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: node - service: miden-node - package: node + package: miden-node + packaging_dir: node crate: miden-node arch: ${{ matrix.arch }} @@ -62,8 +62,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover - package: prover + package: miden-prover + packaging_dir: prover crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -85,8 +85,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover-proxy - package: prover-proxy + package: miden-prover-proxy + packaging_dir: prover-proxy crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -108,7 +108,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: network-monitor - service: miden-network-monitor - package: network-monitor + package: miden-network-monitor + packaging_dir: network-monitor crate: miden-network-monitor arch: ${{ matrix.arch }} diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index 81e8d7447..d17d06532 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -3,8 +3,8 @@ name: Publish Debian Package on: workflow_dispatch: inputs: - service: - description: "Name of service to publish" + package: + description: "Name of package to publish" required: true type: choice options: @@ -20,7 +20,7 @@ on: - network-monitor - node - remote-prover - package: + packaging_dir: required: true description: "Name of packaging directory" type: choice @@ -48,7 +48,7 @@ permissions: jobs: publish: - name: Publish ${{ inputs.service }} ${{ matrix.arch }} Debian + name: Publish ${{ inputs.package }} ${{ matrix.arch }} Debian strategy: matrix: arch: [amd64, arm64] @@ -69,7 +69,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ inputs.version }} crate_dir: ${{ inputs.crate_dir }} - service: ${{ inputs.service }} package: ${{ inputs.package }} + packaging_dir: ${{ inputs.packaging_dir }} crate: ${{ inputs.crate }} arch: ${{ matrix.arch }} diff --git a/packaging/node/miden-validator.service b/packaging/node/miden-validator.service new file mode 100644 index 000000000..7b6c5de87 --- /dev/null +++ b/packaging/node/miden-validator.service @@ -0,0 +1,16 @@ +[Unit] +Description=Miden validator +Wants=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=exec +Environment="OTEL_SERVICE_NAME=miden-validator" +EnvironmentFile=/lib/systemd/system/miden-validator.env +ExecStart=/usr/bin/miden-node validator start +WorkingDirectory=/opt/miden-validator +User=miden-validator +RestartSec=5 +Restart=always diff --git a/packaging/node/postinst b/packaging/node/postinst index 8967f9e54..036b2d112 100644 --- a/packaging/node/postinst +++ b/packaging/node/postinst @@ -2,25 +2,28 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# user is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-node +for svc in miden-node miden-validator; do + # user is expected by the systemd service file and `/opt/` is its working directory, + sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent "$svc" -# Working folder. -if [ -d "/opt/miden-node" ] -then - echo "Directory /opt/miden-node exists." -else - mkdir -p /opt/miden-node -fi -sudo chown -R miden-node /opt/miden-node + # Working folder. + if [ -d "/opt/$svc" ] + then + echo "Directory /opt/$svc exists." + else + mkdir -p "/opt/$svc" + fi + sudo chown -R "$svc" "/opt/$svc" -# Configuration folder -if [ -d "/etc/opt/miden-node" ] -then - echo "Directory /etc/opt/miden-node exists." -else - mkdir -p /etc/opt/miden-node -fi -sudo chown -R miden-node /etc/opt/miden-node + # Configuration folder + if [ -d "/etc/opt/$svc" ] + then + echo "Directory /etc/opt/$svc exists." + else + mkdir -p "/etc/opt/$svc" + fi + sudo chown -R "$svc" "/etc/opt/$svc" + +done sudo systemctl daemon-reload diff --git a/packaging/node/postrm b/packaging/node/postrm index 893a53588..86a9846a2 100644 --- a/packaging/node/postrm +++ b/packaging/node/postrm @@ -3,7 +3,10 @@ ############### # Remove miden-node installs ############## -sudo rm -rf /lib/systemd/system/miden-node.service -sudo rm -rf /etc/opt/miden-node -sudo deluser miden-node +for svc in miden-node miden-validator; do + sudo rm -rf "/lib/systemd/system/$svc.service" + sudo rm -rf "/etc/opt/$svc" + sudo deluser "$svc" +done + sudo systemctl daemon-reload From 8e105fc4e469cea821cddb616a66a3dcddbd3204 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= <4142+huitseeker@users.noreply.github.com> Date: Tue, 10 Feb 2026 08:36:19 +0900 Subject: [PATCH 25/77] Improve tracing span fields (#1650) * Improve tracing fields for structured logging * chore: Changelog * Mark spans as failed on panic for OpenTelemetry Adds tracing::Span::current().set_error() calls in both panic handlers: - catch_panic_layer_fn in crates/utils/src/panic.rs - panic hook in crates/utils/src/logging.rs This ensures OpenTelemetry properly tracks span error status when panics occur. --- CHANGELOG.md | 1 + crates/ntx-builder/src/coordinator.rs | 17 ++++++++++++----- crates/store/src/db/migrations.rs | 4 ++-- crates/store/src/db/mod.rs | 9 ++++++++- crates/store/src/db/schema_hash.rs | 14 ++++++++++++-- crates/store/src/genesis/config/mod.rs | 2 +- crates/store/src/server/api.rs | 23 ++++++++++++++++++++--- crates/utils/src/logging.rs | 9 ++++++++- crates/utils/src/panic.rs | 8 +++++++- 9 files changed, 71 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4aefc8673..a67edba7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). - Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). +- Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) ## v0.13.5 (TBD) diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 58d6ff4c3..673c40106 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -123,7 +123,10 @@ impl Coordinator { // If an actor already exists for this account ID, something has gone wrong. if let Some(handle) = self.actor_registry.remove(&account_id) { - tracing::error!("account actor already exists for account: {}", account_id); + tracing::error!( + account_id = %account_id, + "Account actor already exists" + ); handle.cancel_token.cancel(); } @@ -144,7 +147,7 @@ impl Coordinator { } self.actor_registry.insert(account_id, handle); - tracing::info!("created actor for account: {}", account_id); + tracing::info!(account_id = %account_id, "Created actor for account prefix"); Ok(()) } @@ -165,7 +168,11 @@ impl Coordinator { // Send event to all actors. for (account_id, handle) in &self.actor_registry { if let Err(err) = Self::send(handle, event.clone()).await { - tracing::error!("failed to send event to actor {}: {}", account_id, err); + tracing::error!( + account_id = %account_id, + error = %err, + "Failed to send event to actor" + ); failed_actors.push(*account_id); } } @@ -192,11 +199,11 @@ impl Coordinator { ActorShutdownReason::Cancelled(account_id) => { // Do not remove the actor from the registry, as it may be re-spawned. // The coordinator should always remove actors immediately after cancellation. - tracing::info!("account actor cancelled: {}", account_id); + tracing::info!(account_id = %account_id, "Account actor cancelled"); Ok(()) }, ActorShutdownReason::AccountReverted(account_id) => { - tracing::info!("account reverted: {}", account_id); + tracing::info!(account_id = %account_id, "Account reverted"); self.actor_registry.remove(&account_id); Ok(()) }, diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index 01521e578..8aa0f0a00 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -15,14 +15,14 @@ pub fn apply_migrations( conn: &mut SqliteConnection, ) -> std::result::Result<(), crate::errors::DatabaseError> { let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); - tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + tracing::info!(target = COMPONENT, migrations = migrations.len(), "Applying migrations"); let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { // Migrations applied successfully, verify schema hash verify_schema(conn)?; return Ok(()); }; - tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + tracing::warn!(target = COMPONENT, error = ?e, "Failed to apply migration"); // something went wrong, MIGRATIONS contains conn.revert_last_migration(MIGRATIONS) .expect("Duality is maintained by the developer"); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 6b7ecec6a..a9b77eb9b 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -344,7 +344,14 @@ impl Db { } /// Loads the nullifiers that match the prefixes from the DB. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + #[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(prefix_len, prefixes = nullifier_prefixes.len()), + ret(level = "debug"), + err + )] pub async fn select_nullifiers_by_prefix( &self, prefix_len: u32, diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs index 28e480fc0..bcb417ce9 100644 --- a/crates/store/src/db/schema_hash.rs +++ b/crates/store/src/db/schema_hash.rs @@ -107,10 +107,20 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati // Log specific differences at debug level for obj in &missing { - tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Missing or modified" + ); } for obj in &extra { - tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Extra or modified" + ); } return Err(SchemaVerificationError::Mismatch { diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index e7abe8b58..345253291 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -158,7 +158,7 @@ impl GenesisConfig { for (index, WalletConfig { has_updatable_code, storage_mode, assets }) in wallet_configs.into_iter().enumerate() { - tracing::debug!("Adding wallet account {index} with {assets:?}"); + tracing::debug!(index, assets = ?assets, "Adding wallet account"); let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 63e0f5675..dbea1e2e3 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -176,7 +176,13 @@ where } #[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(nullifiers = nullifiers.len()), + err +)] pub fn validate_nullifiers(nullifiers: &[proto::primitives::Digest]) -> Result, E> where E: From + std::fmt::Display, @@ -190,7 +196,13 @@ where } #[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(notes = notes.len()), + err +)] pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result, Status> { notes .iter() @@ -199,7 +211,12 @@ pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result< .map_err(|_| invalid_argument("Digest field is not in the modulus range")) } -#[instrument(level = "debug",target = COMPONENT, skip_all)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(block_numbers = block_numbers.len()) +)] pub fn read_block_numbers(block_numbers: &[u32]) -> BTreeSet { BTreeSet::from_iter(block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number))) } diff --git a/crates/utils/src/logging.rs b/crates/utils/src/logging.rs index 6593943f4..589365030 100644 --- a/crates/utils/src/logging.rs +++ b/crates/utils/src/logging.rs @@ -10,6 +10,8 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::layer::{Filter, SubscriberExt}; use tracing_subscriber::{Layer, Registry}; +use crate::tracing::OpenTelemetrySpanExt; + /// Global tracer provider for flushing traces on panic. /// /// This is necessary because the panic hook needs access to the tracer provider to flush @@ -89,7 +91,12 @@ pub fn setup_tracing(otel: OpenTelemetry) -> anyhow::Result> { // This chains with the default panic hook to preserve backtrace printing. let default_hook = std::panic::take_hook(); std::panic::set_hook(Box::new(move |info| { - tracing::error!(panic = true, "{info}"); + tracing::error!(panic = true, info = %info, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let info_str = info.to_string(); + let wrapped = anyhow::Error::msg(info_str); + tracing::Span::current().set_error(wrapped.as_ref()); // Flush traces before the program terminates. // This ensures the panic trace is exported even though the OtelGuard won't be dropped. diff --git a/crates/utils/src/panic.rs b/crates/utils/src/panic.rs index f524db7cd..c330fe362 100644 --- a/crates/utils/src/panic.rs +++ b/crates/utils/src/panic.rs @@ -4,6 +4,8 @@ use http::{Response, StatusCode, header}; use http_body_util::Full; pub use tower_http::catch_panic::CatchPanicLayer; +use crate::tracing::OpenTelemetrySpanExt; + /// Custom callback that is used by Tower to fulfill the /// [`tower_http::catch_panic::ResponseForPanic`] trait. /// @@ -12,7 +14,11 @@ pub use tower_http::catch_panic::CatchPanicLayer; pub fn catch_panic_layer_fn(err: Box) -> Response> { // Log the panic error details. let err = stringify_panic_error(err); - tracing::error!(panic = true, "{err}"); + tracing::error!(panic = true, error = %err, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let wrapped = anyhow::Error::msg(err.clone()); + tracing::Span::current().set_error(wrapped.as_ref()); // Return generic error response. Response::builder() From 2987a8de1d77e1e2ee38b8cadef2b9c9a7f119dd Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 10 Feb 2026 15:26:28 +0100 Subject: [PATCH 26/77] chore/deps: `proto` and `protox` bump and better comment on `=x.y.z` need (#1658) --- Cargo.lock | 50 ++++++++++++++++++-------------------------------- Cargo.toml | 9 +++++---- 2 files changed, 23 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65360401b..85179d0f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -1334,7 +1323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -1679,9 +1668,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -2112,7 +2098,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -3479,7 +3465,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3866,7 +3852,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3902,7 +3888,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -3992,7 +3978,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -4024,7 +4010,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4351,9 +4337,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -4366,7 +4352,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck 0.5.0", - "itertools 0.10.5", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -4388,7 +4374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.114", @@ -4443,9 +4429,9 @@ dependencies = [ [[package]] name = "protox" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8555716f64c546306ddf3383065dc40d4232609e79e0a4c50e94e87d54f30fb4" +checksum = "4f25a07a73c6717f0b9bbbd685918f5df9815f7efba450b83d9c9dea41f0e3a1" dependencies = [ "bytes", "miette", @@ -4841,7 +4827,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4854,7 +4840,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5486,7 +5472,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5495,7 +5481,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -6459,7 +6445,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index caccabc5d..d8563722f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,10 +78,11 @@ indexmap = { version = "2.12" } itertools = { version = "0.14" } lru = { default-features = false, version = "0.16" } pretty_assertions = { version = "1.4" } -# breaking change `DecodeError::new` is not exposed anymore -# but is assumed public by some internal dependency -prost = { default-features = false, version = "=0.14.1" } -protox = { version = "=0.9.0" } +# prost and protox are from different authors and are _not_ released in +# lockstep, nor are they adhering to semver semantics. We keep this +# to avoid future breakage. +prost = { default-features = false, version = "=0.14.3" } +protox = { version = "=0.9.1" } rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } From 2f45a9cc555cb23081a7eaaf0dbe267ae9419891 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 10 Feb 2026 16:40:14 +0100 Subject: [PATCH 27/77] chore/rocksdb: static lib linkage for one more `miden-crypto` use (#1659) --- Cargo.lock | 1 + crates/proto/Cargo.toml | 9 ++++---- crates/proto/build.rs | 2 ++ crates/rocksdb-cxx-linkage-fix/src/lib.rs | 25 +++++++++++++++++------ 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85179d0f9..15164a5d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2836,6 +2836,7 @@ dependencies = [ "http", "miden-node-grpc-error-macro", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 6d3589ca3..2e9767f88 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -33,7 +33,8 @@ assert_matches = { workspace = true } proptest = { version = "1.7" } [build-dependencies] -fs-err = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { version = "7.6" } -tonic-prost-build = { workspace = true } +fs-err = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { version = "7.6" } +tonic-prost-build = { workspace = true } diff --git a/crates/proto/build.rs b/crates/proto/build.rs index b0ac773a7..5a39c1d54 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -22,6 +22,8 @@ fn main() -> miette::Result<()> { println!("cargo::rerun-if-changed=../../proto/proto"); println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); + // Skip this build script in BUILD_PROTO environment variable is not set to `1`. if env::var("BUILD_PROTO").unwrap_or("0".to_string()) == "0" { return Ok(()); diff --git a/crates/rocksdb-cxx-linkage-fix/src/lib.rs b/crates/rocksdb-cxx-linkage-fix/src/lib.rs index 9eaae82fd..35bc05d00 100644 --- a/crates/rocksdb-cxx-linkage-fix/src/lib.rs +++ b/crates/rocksdb-cxx-linkage-fix/src/lib.rs @@ -15,16 +15,29 @@ pub fn configure() { } } -fn should_link_cpp_stdlib() -> bool { - let rocksdb_compile = env::var("ROCKSDB_COMPILE").unwrap_or_default(); - let rocksdb_compile_disabled = matches!(rocksdb_compile.as_str(), "0" | "false" | "FALSE"); - let rocksdb_static = env::var("ROCKSDB_STATIC").is_ok(); - let rocksdb_lib_dir_set = env::var("ROCKSDB_LIB_DIR").is_ok(); +fn should_compile() -> bool { + // in sync with + if let Ok(v) = env::var("ROCKSDB_COMPILE") { + if v.to_lowercase() == "true" || v == "1" { + return true; + } + } + false +} - rocksdb_lib_dir_set || (rocksdb_static && rocksdb_compile_disabled) +fn should_link_cpp_stdlib() -> bool { + if should_compile() { + return false; + } + // the value doesn't matter + // + env::var("ROCKSDB_STATIC").is_ok() + // `ROCKSDB_LIB_DIR` is not really discriminative, it only adds extra lookup dirs for the linker } fn link_cpp_stdlib(target: &str) { + // aligned with + // if let Ok(stdlib) = env::var("CXXSTDLIB") { println!("cargo:rustc-link-lib=dylib={stdlib}"); } else if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { From 4659bea2999af8cc5e3c0425474751a3da7111c6 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 11 Feb 2026 03:46:28 -0300 Subject: [PATCH 28/77] feat: add ntx-builder database (#1654) --- Cargo.lock | 5 + Cargo.toml | 6 + crates/ntx-builder/Cargo.toml | 7 +- crates/ntx-builder/build.rs | 11 + crates/ntx-builder/diesel.toml | 5 + crates/ntx-builder/src/db/errors.rs | 69 +++++++ crates/ntx-builder/src/db/manager.rs | 86 ++++++++ crates/ntx-builder/src/db/migrations.rs | 29 +++ .../migrations/2026020900000_setup/down.sql | 1 + .../db/migrations/2026020900000_setup/up.sql | 59 ++++++ crates/ntx-builder/src/db/mod.rs | 121 +++++++++++ crates/ntx-builder/src/db/schema.rs | 32 +++ crates/ntx-builder/src/db/schema_hash.rs | 190 ++++++++++++++++++ crates/ntx-builder/src/lib.rs | 2 + crates/store/Cargo.toml | 10 +- 15 files changed, 627 insertions(+), 6 deletions(-) create mode 100644 crates/ntx-builder/build.rs create mode 100644 crates/ntx-builder/diesel.toml create mode 100644 crates/ntx-builder/src/db/errors.rs create mode 100644 crates/ntx-builder/src/db/manager.rs create mode 100644 crates/ntx-builder/src/db/migrations.rs create mode 100644 crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql create mode 100644 crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql create mode 100644 crates/ntx-builder/src/db/mod.rs create mode 100644 crates/ntx-builder/src/db/schema.rs create mode 100644 crates/ntx-builder/src/db/schema_hash.rs diff --git a/Cargo.lock b/Cargo.lock index 15164a5d0..52c342f35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2806,6 +2806,11 @@ name = "miden-node-ntx-builder" version = "0.14.0" dependencies = [ "anyhow", + "deadpool", + "deadpool-diesel", + "deadpool-sync", + "diesel", + "diesel_migrations", "futures", "indexmap 2.13.0", "miden-node-proto", diff --git a/Cargo.toml b/Cargo.toml index d8563722f..d481c1df0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,6 +69,11 @@ anyhow = { version = "1.0" } assert_matches = { version = "1.5" } async-trait = { version = "0.1" } clap = { features = ["derive"], version = "4.5" } +deadpool = { default-features = false, version = "0.12" } +deadpool-diesel = { version = "0.6" } +deadpool-sync = { default-features = false, version = "0.1" } +diesel = { version = "2.3" } +diesel_migrations = { version = "2.3" } fs-err = { version = "3" } futures = { version = "0.3" } hex = { version = "0.4" } @@ -90,6 +95,7 @@ serde = { features = ["derive"], version = "1" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } +tokio-util = { version = "0.7" } toml = { version = "0.9" } tonic = { default-features = false, version = "0.14" } tonic-prost = { version = "0.14" } diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 06ed8eb3b..e1d6dab84 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -15,6 +15,11 @@ workspace = true [dependencies] anyhow = { workspace = true } +deadpool = { features = ["managed", "rt_tokio_1"], workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { features = ["tracing"], workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } indexmap = { workspace = true } miden-node-proto = { workspace = true } @@ -25,7 +30,7 @@ miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } -tokio-util = { version = "0.7" } +tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } diff --git a/crates/ntx-builder/build.rs b/crates/ntx-builder/build.rs new file mode 100644 index 000000000..881be3168 --- /dev/null +++ b/crates/ntx-builder/build.rs @@ -0,0 +1,11 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `src/db/migrations.rs` to include the latest version of the migrations into the binary, see +// . + +fn main() { + println!("cargo:rerun-if-changed=./src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + println!("cargo:rerun-if-changed=Cargo.toml"); +} diff --git a/crates/ntx-builder/diesel.toml b/crates/ntx-builder/diesel.toml new file mode 100644 index 000000000..71215dbf7 --- /dev/null +++ b/crates/ntx-builder/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/ntx-builder/src/db/errors.rs b/crates/ntx-builder/src/db/errors.rs new file mode 100644 index 000000000..1ea43e382 --- /dev/null +++ b/crates/ntx-builder/src/db/errors.rs @@ -0,0 +1,69 @@ +use deadpool_sync::InteractError; + +use crate::db::manager::ConnectionManagerError; + +// DATABASE ERRORS +// ================================================================================================ + +#[derive(Debug, thiserror::Error)] +pub enum DatabaseError { + #[error("setup deadpool connection pool failed")] + ConnectionPoolObtainError(#[from] Box), + #[error(transparent)] + Diesel(#[from] diesel::result::Error), + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), + #[error("connection manager error")] + ConnectionManager(#[source] ConnectionManagerError), +} + +impl DatabaseError { + /// Converts from `InteractError`. + /// + /// Required since `InteractError` has at least one enum variant that is _not_ `Send + + /// Sync` and hence prevents the `Sync` auto implementation. This does an internal + /// conversion to string while maintaining convenience. + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } +} + +// DATABASE SETUP ERRORS +// ================================================================================================ + +#[derive(Debug, thiserror::Error)] +pub enum DatabaseSetupError { + #[error("I/O error")] + Io(#[from] std::io::Error), + #[error("database error")] + Database(#[from] DatabaseError), + #[error("pool build error")] + PoolBuild(#[source] deadpool::managed::BuildError), +} + +// SCHEMA VERIFICATION ERRORS +// ================================================================================================ + +/// Errors that can occur during schema verification. +#[derive(Debug, thiserror::Error)] +pub enum SchemaVerificationError { + #[error("failed to create in-memory reference database")] + InMemoryDbCreation(#[source] diesel::ConnectionError), + #[error("failed to apply migrations to reference database")] + MigrationApplication(#[source] Box), + #[error("failed to extract schema from database")] + SchemaExtraction(#[source] diesel::result::Error), + #[error( + "schema mismatch: expected {expected_count} objects, found {actual_count} \ + ({missing_count} missing, {extra_count} unexpected)" + )] + Mismatch { + expected_count: usize, + actual_count: usize, + missing_count: usize, + extra_count: usize, + }, +} diff --git a/crates/ntx-builder/src/db/manager.rs b/crates/ntx-builder/src/db/manager.rs new file mode 100644 index 000000000..4234e09dd --- /dev/null +++ b/crates/ntx-builder/src/db/manager.rs @@ -0,0 +1,86 @@ +//! A minimal connection manager wrapper. +//! +//! Only required to setup connection parameters, specifically `WAL`. + +use deadpool_sync::InteractError; +use diesel::{RunQueryDsl, SqliteConnection}; + +#[derive(thiserror::Error, Debug)] +pub enum ConnectionManagerError { + #[error("failed to apply connection parameter")] + ConnectionParamSetup(#[source] diesel::result::Error), + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("failed to create a new connection")] + ConnectionCreate(#[source] deadpool_diesel::Error), + #[error("failed to recycle connection")] + PoolRecycle(#[source] deadpool::managed::RecycleError), +} + +impl ConnectionManagerError { + /// Converts from `InteractError`. + /// + /// Required since `InteractError` has at least one enum variant that is _not_ `Send + + /// Sync` and hence prevents the `Sync` auto implementation. + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } +} + +/// Create a connection manager with per-connection setup. +/// +/// Particularly, `foreign_key` checks are enabled and using a write-append-log for journaling. +pub(crate) struct ConnectionManager { + pub(crate) manager: deadpool_diesel::sqlite::Manager, +} + +impl ConnectionManager { + pub(crate) fn new(database_path: &str) -> Self { + let manager = deadpool_diesel::sqlite::Manager::new( + database_path.to_owned(), + deadpool_diesel::sqlite::Runtime::Tokio1, + ); + Self { manager } + } +} + +impl deadpool::managed::Manager for ConnectionManager { + type Type = deadpool_sync::SyncWrapper; + type Error = ConnectionManagerError; + + async fn create(&self) -> Result { + let conn = self.manager.create().await.map_err(ConnectionManagerError::ConnectionCreate)?; + + conn.interact(configure_connection_on_creation) + .await + .map_err(|e| ConnectionManagerError::interact("Connection setup", &e))??; + Ok(conn) + } + + async fn recycle( + &self, + conn: &mut Self::Type, + metrics: &deadpool_diesel::Metrics, + ) -> deadpool::managed::RecycleResult { + self.manager.recycle(conn, metrics).await.map_err(|err| { + deadpool::managed::RecycleError::Backend(ConnectionManagerError::PoolRecycle(err)) + })?; + Ok(()) + } +} + +pub(crate) fn configure_connection_on_creation( + conn: &mut SqliteConnection, +) -> Result<(), ConnectionManagerError> { + // Enable the WAL mode. This allows concurrent reads while a write is in progress. + diesel::sql_query("PRAGMA journal_mode=WAL") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + + // Enable foreign key checks. + diesel::sql_query("PRAGMA foreign_keys=ON") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + Ok(()) +} diff --git a/crates/ntx-builder/src/db/migrations.rs b/crates/ntx-builder/src/db/migrations.rs new file mode 100644 index 000000000..069bdd411 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations.rs @@ -0,0 +1,29 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::errors::DatabaseError; +use crate::db::schema_hash::verify_schema; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target: COMPONENT, migrations = migrations.len(), "Applying pending migrations"); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + // Migrations applied successfully, verify schema hash. + verify_schema(conn)?; + return Ok(()); + }; + tracing::warn!(target: COMPONENT, "Failed to apply migration: {e:?}"); + // Something went wrong; revert the last migration. + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql @@ -0,0 +1 @@ + diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql new file mode 100644 index 000000000..2588a85bd --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql @@ -0,0 +1,59 @@ +-- Singleton row storing the chain tip header. +-- The chain MMR is reconstructed on startup from the store and maintained in memory. +CREATE TABLE chain_state ( + -- Singleton constraint: only one row allowed. + id INTEGER PRIMARY KEY CHECK (id = 0), + -- Block number of the chain tip. + block_num INTEGER NOT NULL, + -- Serialized BlockHeader. + block_header BLOB NOT NULL, + + CONSTRAINT chain_state_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) +); + +-- Account states: both committed and inflight. +-- Committed rows have transaction_id = NULL. Inflight rows have transaction_id set. +-- The auto-incrementing order_id preserves insertion order (VecDeque semantics). +CREATE TABLE accounts ( + -- Auto-incrementing ID preserves insertion order. + order_id INTEGER PRIMARY KEY AUTOINCREMENT, + -- AccountId serialized bytes (8 bytes). + account_id BLOB NOT NULL, + -- Serialized Account state. + account_data BLOB NOT NULL, + -- NULL if this is the committed state; transaction ID if inflight. + transaction_id BLOB +); + +-- At most one committed row per account. +CREATE UNIQUE INDEX idx_accounts_committed ON accounts(account_id) WHERE transaction_id IS NULL; +CREATE INDEX idx_accounts_account ON accounts(account_id); +CREATE INDEX idx_accounts_tx ON accounts(transaction_id) WHERE transaction_id IS NOT NULL; + +-- Notes: committed, inflight, and nullified — all in one table. +-- created_by = NULL means committed note; non-NULL means created by inflight tx. +-- consumed_by = NULL means unconsumed; non-NULL means consumed by inflight tx. +-- Row is deleted once consumption is committed. +CREATE TABLE notes ( + -- Nullifier bytes (32 bytes). Primary key. + nullifier BLOB PRIMARY KEY, + -- Target account ID. + account_id BLOB NOT NULL, + -- Serialized SingleTargetNetworkNote. + note_data BLOB NOT NULL, + -- Backoff tracking: number of failed execution attempts. + attempt_count INTEGER NOT NULL DEFAULT 0, + -- Backoff tracking: block number of the last failed attempt. NULL if never attempted. + last_attempt INTEGER, + -- NULL if the note came from a committed block; transaction ID if created by inflight tx. + created_by BLOB, + -- NULL if unconsumed; transaction ID of the consuming inflight tx. + consumed_by BLOB, + + CONSTRAINT notes_attempt_count_non_negative CHECK (attempt_count >= 0), + CONSTRAINT notes_last_attempt_is_u32 CHECK (last_attempt BETWEEN 0 AND 0xFFFFFFFF) +) WITHOUT ROWID; + +CREATE INDEX idx_notes_account ON notes(account_id); +CREATE INDEX idx_notes_created_by ON notes(created_by) WHERE created_by IS NOT NULL; +CREATE INDEX idx_notes_consumed_by ON notes(consumed_by) WHERE consumed_by IS NOT NULL; diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs new file mode 100644 index 000000000..febd14f1b --- /dev/null +++ b/crates/ntx-builder/src/db/mod.rs @@ -0,0 +1,121 @@ +use std::path::PathBuf; + +use anyhow::Context; +use diesel::{Connection, SqliteConnection}; +use tracing::{info, instrument}; + +use crate::COMPONENT; +use crate::db::errors::{DatabaseError, DatabaseSetupError}; +use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; +use crate::db::migrations::apply_migrations; + +pub mod errors; +pub(crate) mod manager; + +mod migrations; +mod schema_hash; + +/// [diesel](https://diesel.rs) generated schema. +pub(crate) mod schema; + +pub type Result = std::result::Result; + +pub struct Db { + pool: deadpool_diesel::Pool>, +} + +impl Db { + /// Creates a new database file, configures it, and applies migrations. + /// + /// This is a synchronous one-shot setup used during node initialization. + /// For runtime access with a connection pool, use [`Db::load`]. + #[instrument( + target = COMPONENT, + name = "ntx_builder.database.bootstrap", + skip_all, + fields(path=%database_filepath.display()), + err, + )] + pub fn bootstrap(database_filepath: PathBuf) -> anyhow::Result<()> { + let mut conn: SqliteConnection = diesel::sqlite::SqliteConnection::establish( + database_filepath.to_str().context("database filepath is invalid")?, + ) + .context("failed to open a database connection")?; + + configure_connection_on_creation(&mut conn)?; + + // Run migrations. + apply_migrations(&mut conn).context("failed to apply database migrations")?; + + Ok(()) + } + + /// Create and commit a transaction with the queries added in the provided closure. + #[allow(dead_code)] + pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result + + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Run the query _without_ a transaction. + pub(crate) async fn query(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(move |conn| { + let r = query(conn)?; + Ok(r) + }) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Opens a connection pool to an existing database and re-applies pending migrations. + /// + /// Use [`Db::bootstrap`] first to create and initialize the database file. + #[instrument(target = COMPONENT, skip_all)] + pub async fn load(database_filepath: PathBuf) -> Result { + let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); + let pool = deadpool_diesel::Pool::builder(manager) + .max_size(16) + .build() + .map_err(DatabaseSetupError::PoolBuild)?; + + info!( + target: COMPONENT, + sqlite = %database_filepath.display(), + "Connected to the database" + ); + + let me = Db { pool }; + me.query("migrations", apply_migrations).await?; + Ok(me) + } +} diff --git a/crates/ntx-builder/src/db/schema.rs b/crates/ntx-builder/src/db/schema.rs new file mode 100644 index 000000000..74ee8d462 --- /dev/null +++ b/crates/ntx-builder/src/db/schema.rs @@ -0,0 +1,32 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + accounts (order_id) { + order_id -> Nullable, + account_id -> Binary, + account_data -> Binary, + transaction_id -> Nullable, + } +} + +diesel::table! { + chain_state (id) { + id -> Nullable, + block_num -> Integer, + block_header -> Binary, + } +} + +diesel::table! { + notes (nullifier) { + nullifier -> Binary, + account_id -> Binary, + note_data -> Binary, + attempt_count -> Integer, + last_attempt -> Nullable, + created_by -> Nullable, + consumed_by -> Nullable, + } +} + +diesel::allow_tables_to_appear_in_same_query!(accounts, chain_state, notes,); diff --git a/crates/ntx-builder/src/db/schema_hash.rs b/crates/ntx-builder/src/db/schema_hash.rs new file mode 100644 index 000000000..21ebb0c7b --- /dev/null +++ b/crates/ntx-builder/src/db/schema_hash.rs @@ -0,0 +1,190 @@ +//! Schema verification to detect database schema changes. +//! +//! Detects: +//! +//! - Direct modifications to the database schema outside of migrations +//! - Running a node against a database created with different set of migrations +//! - Forgetting to reset the database after schema changes i.e. for a specific migration +//! +//! The verification works by creating an in-memory reference database, applying all +//! migrations to it, and comparing its schema against the actual database schema. + +use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::errors::SchemaVerificationError; +use crate::db::migrations::MIGRATIONS; + +/// Represents a schema object for comparison. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +struct SchemaObject { + object_type: String, + name: String, + sql: String, +} + +/// Represents a row from the `sqlite_schema` table. +#[derive(diesel::QueryableByName, Debug)] +struct SqliteSchemaRow { + #[diesel(sql_type = diesel::sql_types::Text)] + schema_type: String, + #[diesel(sql_type = diesel::sql_types::Text)] + name: String, + #[diesel(sql_type = diesel::sql_types::Nullable)] + sql: Option, +} + +/// Extracts all schema objects from a database connection. +fn extract_schema( + conn: &mut SqliteConnection, +) -> Result, SchemaVerificationError> { + let rows: Vec = diesel::sql_query( + "SELECT type as schema_type, name, sql FROM sqlite_schema \ + WHERE type IN ('table', 'index') \ + AND name NOT LIKE 'sqlite_%' \ + AND name NOT LIKE '__diesel_%' \ + ORDER BY type, name", + ) + .load(conn) + .map_err(SchemaVerificationError::SchemaExtraction)?; + + let mut objects: Vec = rows + .into_iter() + .filter_map(|row| { + row.sql.map(|sql| SchemaObject { + object_type: row.schema_type, + name: row.name, + sql, + }) + }) + .collect(); + + objects.sort(); + Ok(objects) +} + +/// Computes the expected schema by applying migrations to an in-memory database. +fn compute_expected_schema() -> Result, SchemaVerificationError> { + let mut conn = SqliteConnection::establish(":memory:") + .map_err(SchemaVerificationError::InMemoryDbCreation)?; + + conn.run_pending_migrations(MIGRATIONS) + .map_err(SchemaVerificationError::MigrationApplication)?; + + extract_schema(&mut conn) +} + +/// Verifies that the database schema matches the expected schema. +/// +/// Creates an in-memory database, applies all migrations, and compares schemas. +/// +/// # Errors +/// +/// Returns `SchemaVerificationError::Mismatch` if schemas differ. +#[instrument(level = "info", target = COMPONENT, skip_all, err)] +pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificationError> { + let expected = compute_expected_schema()?; + let actual = extract_schema(conn)?; + + if actual != expected { + let expected_names: Vec<_> = expected.iter().map(|o| &o.name).collect(); + let actual_names: Vec<_> = actual.iter().map(|o| &o.name).collect(); + + // Find differences for better error messages. + let missing: Vec<_> = expected.iter().filter(|e| !actual.contains(e)).collect(); + let extra: Vec<_> = actual.iter().filter(|a| !expected.contains(a)).collect(); + + tracing::error!( + target: COMPONENT, + ?expected_names, + ?actual_names, + missing_count = missing.len(), + extra_count = extra.len(), + "Database schema mismatch detected" + ); + + // Log specific differences at debug level. + for obj in &missing { + tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + } + for obj in &extra { + tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + } + + return Err(SchemaVerificationError::Mismatch { + expected_count: expected.len(), + actual_count: actual.len(), + missing_count: missing.len(), + extra_count: extra.len(), + }); + } + + tracing::info!( + target: COMPONENT, + objects = expected.len(), + "Database schema verification passed" + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::errors::DatabaseError; + use crate::db::migrations::apply_migrations; + + #[test] + fn verify_schema_passes_for_correct_schema() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + verify_schema(&mut conn).expect("Should pass for correct schema"); + } + + #[test] + fn verify_schema_fails_for_added_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE rogue_table (id INTEGER PRIMARY KEY)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn verify_schema_fails_for_removed_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("DROP TABLE notes").execute(&mut conn).unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn apply_migrations_succeeds_on_fresh_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + apply_migrations(&mut conn).expect("Should succeed on fresh database"); + } + + #[test] + fn apply_migrations_fails_on_tampered_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE tampered (id INTEGER)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index fe32f850f..d77a8dd7d 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -16,6 +16,8 @@ mod actor; mod block_producer; mod builder; mod coordinator; +#[expect(dead_code, reason = "will be used as part of follow-up work")] +pub(crate) mod db; mod store; pub use builder::NetworkTransactionBuilder; diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index bbdc9ef41..8850a4c4e 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -16,11 +16,11 @@ workspace = true [dependencies] anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } +deadpool = { features = ["managed", "rt_tokio_1"], workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { features = ["tracing"], workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } fs-err = { workspace = true } futures = { workspace = true } hex = { version = "0.4" } From 58cc57d6ef9c1a082bd8fb073f036df98ae97753 Mon Sep 17 00:00:00 2001 From: Himess <95512809+Himess@users.noreply.github.com> Date: Wed, 11 Feb 2026 10:27:22 +0300 Subject: [PATCH 29/77] feat(store): add typed error codes for GetAccount endpoint (#1646) --- CHANGELOG.md | 1 + crates/store/src/errors.rs | 97 ++++++++++++++++++++++++++ crates/store/src/server/ntx_builder.rs | 9 ++- crates/store/src/server/rpc_api.rs | 3 +- crates/store/src/state/mod.rs | 57 ++++++--------- docs/external/src/rpc.md | 13 ++++ 6 files changed, 143 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a67edba7b..02b443a8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ### Changes +- [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/miden-node/pull/1646)). - Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 0267a42e7..99f658eab 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -489,6 +489,26 @@ pub enum GetBlockByNumberError { DeserializationFailed(#[from] DeserializationError), } +// GET ACCOUNT ERRORS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetAccountError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("malformed request")] + DeserializationFailed(#[from] ConversionError), + #[error("account {0} not found at block {1}")] + AccountNotFound(AccountId, BlockNumber), + #[error("account {0} is not public")] + AccountNotPublic(AccountId), + #[error("block {0} is unknown")] + UnknownBlock(BlockNumber), + #[error("block {0} has been pruned")] + BlockPruned(BlockNumber), +} + // GET NOTES BY ID ERRORS // ================================================================================================ @@ -581,6 +601,83 @@ pub enum SchemaVerificationError { }, } +#[cfg(test)] +mod get_account_error_tests { + use miden_protocol::account::AccountId; + use miden_protocol::block::BlockNumber; + use miden_protocol::testing::account_id::AccountIdBuilder; + use tonic::Status; + + use super::GetAccountError; + + fn test_account_id() -> AccountId { + AccountIdBuilder::new().build_with_seed([1; 32]) + } + + #[test] + fn unknown_block_returns_invalid_argument() { + let block = BlockNumber::from(999); + let err = GetAccountError::UnknownBlock(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + assert!(!status.metadata().is_empty() || !status.details().is_empty()); + } + + #[test] + fn block_pruned_returns_invalid_argument() { + let block = BlockNumber::from(1); + let err = GetAccountError::BlockPruned(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_public_returns_invalid_argument() { + let err = GetAccountError::AccountNotPublic(test_account_id()); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_found_returns_invalid_argument_with_block_context() { + let account_id = test_account_id(); + let block = BlockNumber::from(5); + let err = GetAccountError::AccountNotFound(account_id, block); + let msg = err.to_string(); + assert!(msg.contains("not found"), "error message should mention 'not found'"); + assert!(msg.contains("block"), "error message should include block context"); + + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn each_variant_has_unique_discriminant() { + let account_id = test_account_id(); + let block = BlockNumber::from(1); + + let errors = [ + GetAccountError::AccountNotFound(account_id, block), + GetAccountError::AccountNotPublic(account_id), + GetAccountError::UnknownBlock(block), + GetAccountError::BlockPruned(block), + ]; + + let codes: Vec = errors.iter().map(|e| e.api_error().api_code()).collect(); + + // All non-internal variants should have unique, non-zero discriminants + for &code in &codes { + assert_ne!(code, 0, "non-internal variants should not map to Internal (0)"); + } + + // Check uniqueness + let mut sorted = codes.clone(); + sorted.sort_unstable(); + sorted.dedup(); + assert_eq!(sorted.len(), codes.len(), "all error variants should have unique codes"); + } +} + // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear // in the compilation of the library or binary, which would prevent getting to compiling the // following code. diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index a0fefa0e7..6a61b4daf 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -16,7 +16,12 @@ use tracing::debug; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError, GetWitnessesError}; +use crate::errors::{ + GetAccountError, + GetNetworkAccountIdsError, + GetNoteScriptByRootError, + GetWitnessesError, +}; use crate::server::api::{ StoreApi, internal_error, @@ -167,7 +172,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let proof = self.state.get_account(account_request).await?; diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index fb3924da6..6c78e1ebf 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -17,6 +17,7 @@ use tracing::{debug, info}; use crate::COMPONENT; use crate::errors::{ CheckNullifiersError, + GetAccountError, GetBlockByNumberError, GetNoteScriptByRootError, GetNotesByIdError, @@ -250,7 +251,7 @@ impl rpc_server::Rpc for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let account_data = self.state.get_account(account_request).await?; diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 55b3204ee..40f6f29e6 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -42,6 +42,7 @@ use crate::db::{Db, NoteRecord, NullifierInfo}; use crate::errors::{ ApplyBlockError, DatabaseError, + GetAccountError, GetBatchInputsError, GetBlockHeaderError, GetBlockInputsError, @@ -609,11 +610,11 @@ impl State { pub async fn get_account( &self, account_request: AccountRequest, - ) -> Result { + ) -> Result { let AccountRequest { block_num, account_id, details } = account_request; if details.is_some() && !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; @@ -635,19 +636,20 @@ impl State { &self, block_num: Option, account_id: AccountId, - ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { + ) -> Result<(BlockNumber, AccountWitness), GetAccountError> { let inner_state = self.inner.read().await; // Determine which block to query let (block_num, witness) = if let Some(requested_block) = block_num { // Historical query: use the account tree with history - let witness = inner_state - .account_tree - .open_at(account_id, requested_block) - .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { - block_num: requested_block, - reason: "Block is either in the future or has been pruned from history" - .to_string(), + let witness = + inner_state.account_tree.open_at(account_id, requested_block).ok_or_else(|| { + let latest_block = inner_state.account_tree.block_number_latest(); + if requested_block > latest_block { + GetAccountError::UnknownBlock(requested_block) + } else { + GetAccountError::BlockPruned(requested_block) + } })?; (requested_block, witness) } else { @@ -674,7 +676,7 @@ impl State { account_id: AccountId, block_num: BlockNumber, detail_request: AccountDetailRequest, - ) -> Result { + ) -> Result { let AccountDetailRequest { code_commitment, asset_vault_commitment, @@ -682,18 +684,25 @@ impl State { } = detail_request; if !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } // Validate block exists in the blockchain before querying the database - self.validate_block_exists(block_num).await?; + { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(GetAccountError::UnknownBlock(block_num)); + } + } // Query account header and storage header together in a single DB call let (account_header, storage_header) = self .db .select_account_header_with_storage_header_at_block(account_id, block_num) .await? - .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; + .ok_or(GetAccountError::AccountNotFound(account_id, block_num))?; let account_code = match code_commitment { Some(commitment) if commitment == account_header.code_commitment() => None, @@ -772,26 +781,6 @@ impl State { self.inner.read().await.latest_block_num() } - /// Validates that a block exists in the blockchain - /// - /// # Attention - /// - /// Acquires a *read lock** on `self.inner`. - /// - /// # Errors - /// - /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. - async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { - let inner = self.inner.read().await; - let latest_block_num = inner.latest_block_num(); - - if block_num > latest_block_num { - return Err(DatabaseError::BlockNotFound(block_num)); - } - - Ok(()) - } - /// Emits metrics for each database table's size. pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { self.db.analyze_table_sizes().await diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index b26e88131..e25bbd54d 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -107,6 +107,19 @@ The witness proves the account's state commitment in the account tree. If detail If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. +#### Error Codes + +When the request fails, detailed error information is provided through gRPC status details. The following error codes may be returned: + +| Error Code | Value | gRPC Status | Description | +|---------------------------|-------|--------------------|------------------------------------------------------| +| `INTERNAL_ERROR` | 0 | `INTERNAL` | Internal server error occurred | +| `DESERIALIZATION_FAILED` | 1 | `INVALID_ARGUMENT` | Request could not be deserialized | +| `ACCOUNT_NOT_FOUND` | 2 | `INVALID_ARGUMENT` | Account not found at the requested block | +| `ACCOUNT_NOT_PUBLIC` | 3 | `INVALID_ARGUMENT` | Account details requested for a non-public account | +| `UNKNOWN_BLOCK` | 4 | `INVALID_ARGUMENT` | Requested block number is unknown | +| `BLOCK_PRUNED` | 5 | `INVALID_ARGUMENT` | Requested block has been pruned | + ### GetBlockByNumber Request the raw data for a specific block. From e354490f81385e2266630c65059070a54922aceb Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 11 Feb 2026 16:40:56 +0100 Subject: [PATCH 30/77] chore/makefile: add `stress-test-smoke` target (#1665) --- .gitignore | 1 + Makefile | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/.gitignore b/.gitignore index 0a086d3d0..a4d92ce8e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ # will have compiled files and executables debug/ target/ +miden-node-stress-test-* # Generated by protox `file_descriptor_set.bin` *.bin diff --git a/Makefile b/Makefile index 64aa55bf4..fd1408f70 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ help: WARNINGS=RUSTDOCFLAGS="-D warnings" BUILD_PROTO=BUILD_PROTO=1 CONTAINER_RUNTIME ?= docker +STRESS_TEST_DATA_DIR ?= stress-test-store-$(shell date +%Y%m%d-%H%M%S) # -- linting -------------------------------------------------------------------------------------- @@ -108,6 +109,15 @@ install-node: ## Installs node install-remote-prover: ## Install remote prover's CLI $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --features concurrent --locked +.PHONY: stress-test-smoke +stress-test: ## Runs stress-test benchmarks + ${BUILD_PROTO} cargo build --release --locked -p miden-node-stress-test + @mkdir -p $(STRESS_TEST_DATA_DIR) + ./target/release/miden-node-stress-test seed-store --data-directory $(STRESS_TEST_DATA_DIR) --num-accounts 500 --public-accounts-percentage 50 + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-state + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-notes + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 + .PHONY: install-stress-test install-stress-test: ## Installs stress-test binary cargo install --path bin/stress-test --locked From 2e32e206d9e3502368a06686b9ffe4f7cc22397b Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 13 Feb 2026 14:41:22 +0200 Subject: [PATCH 31/77] chore(lint): prefer `#[expect]` over `#[allow]` (#1668) --- Cargo.toml | 1 + bin/network-monitor/src/counter.rs | 2 +- bin/network-monitor/src/faucet.rs | 4 ++-- bin/node/src/commands/bundled.rs | 2 +- bin/node/src/commands/store.rs | 2 +- bin/remote-prover/src/api/prover.rs | 3 --- bin/remote-prover/src/generated/mod.rs | 1 + bin/remote-prover/src/proxy/mod.rs | 2 -- bin/remote-prover/src/proxy/worker.rs | 1 - bin/stress-test/src/seeding/metrics.rs | 4 ++-- bin/stress-test/src/seeding/mod.rs | 2 +- bin/stress-test/src/store/metrics.rs | 2 +- bin/stress-test/src/store/mod.rs | 10 ++++----- .../block-producer/src/domain/transaction.rs | 10 ++++----- crates/block-producer/src/lib.rs | 2 +- crates/block-producer/src/server/mod.rs | 1 - crates/ntx-builder/src/actor/mod.rs | 2 +- crates/ntx-builder/src/db/mod.rs | 2 +- crates/proto/build.rs | 22 ++++++++++--------- crates/proto/src/generated/mod.rs | 8 +++++-- crates/remote-prover-client/src/lib.rs | 2 +- .../src/remote_prover/generated/mod.rs | 1 + crates/rpc/src/server/api.rs | 4 +--- crates/store/src/accounts/mod.rs | 2 +- crates/store/src/accounts/tests.rs | 8 +++---- crates/store/src/db/models/conv.rs | 12 +++++----- .../store/src/db/models/queries/accounts.rs | 6 ++--- .../src/db/models/queries/block_headers.rs | 2 +- crates/store/src/db/models/queries/mod.rs | 2 +- crates/store/src/db/models/queries/notes.rs | 22 +++++-------------- .../store/src/db/models/queries/nullifiers.rs | 3 +-- .../src/db/models/queries/transactions.rs | 5 ++--- crates/store/src/db/models/utils.rs | 9 ++++---- crates/store/src/db/tests.rs | 3 --- crates/store/src/errors.rs | 2 +- crates/store/src/genesis/config/errors.rs | 1 - crates/store/src/genesis/config/mod.rs | 2 +- crates/store/src/server/api.rs | 2 -- crates/store/src/state/apply_block.rs | 2 +- crates/utils/src/config.rs | 2 +- crates/utils/src/limiter.rs | 2 +- 41 files changed, 78 insertions(+), 99 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c0a25b5d5..02978e521 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ url = { features = ["serde"], version = "2.5" } # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. pedantic = { level = "warn", priority = -1 } +allow_attributes = "deny" cast_possible_truncation = "allow" # Overly many instances especially regarding indices. collapsible-if = "allow" # Too new to enforce. from_iter_instead_of_collect = "allow" # at times `FromIter` is much more readable diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 0b5638e53..c2b9d0835 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -751,7 +751,7 @@ fn load_counter_account(file_path: &Path) -> Result { } /// Create and submit a network note that targets the counter account. -#[allow(clippy::too_many_arguments)] +#[expect(clippy::too_many_arguments)] #[instrument( parent = None, target = COMPONENT, diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 370d7bb10..caeafe055 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -47,7 +47,7 @@ pub struct FaucetTestDetails { struct PowChallengeResponse { challenge: String, target: u64, - #[allow(dead_code)] // Timestamp is part of API response but not used + #[expect(dead_code)] // Timestamp is part of API response but not used timestamp: u64, } @@ -55,7 +55,7 @@ struct PowChallengeResponse { #[derive(Debug, Deserialize)] struct GetTokensResponse { tx_id: String, - #[allow(dead_code)] // Note ID is part of API response but not used in monitoring + #[expect(dead_code)] // Note ID is part of API response but not used in monitoring note_id: String, } diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 9cfc654b1..8bc38fd07 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -157,7 +157,7 @@ impl BundledCommand { } } - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] async fn start( rpc_url: Url, block_prover_url: Option, diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index a78655cd9..bde1cf774 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -25,7 +25,7 @@ use crate::commands::{ duration_to_human_readable_string, }; -#[allow(clippy::large_enum_variant, reason = "single use enum")] +#[expect(clippy::large_enum_variant, reason = "single use enum")] #[derive(clap::Subcommand)] pub enum StoreCommand { /// Bootstraps the blockchain database with the genesis block. diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index d9d8e8c06..9af8f8eb3 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -88,7 +88,6 @@ impl ProverRpcApi { Self { prover } } - #[allow(clippy::result_large_err)] #[instrument( target = COMPONENT, name = "remote_prover.prove_tx", @@ -123,7 +122,6 @@ impl ProverRpcApi { Ok(Response::new(proto::remote_prover::Proof { payload: proof.to_bytes() })) } - #[allow(clippy::result_large_err)] #[instrument( target = COMPONENT, name = "remote_prover.prove_batch", @@ -154,7 +152,6 @@ impl ProverRpcApi { Ok(Response::new(proto::remote_prover::Proof { payload: proven_batch.to_bytes() })) } - #[allow(clippy::result_large_err)] #[instrument( target = COMPONENT, name = "remote_prover.prove_block", diff --git a/bin/remote-prover/src/generated/mod.rs b/bin/remote-prover/src/generated/mod.rs index eb7d89309..830c3a508 100644 --- a/bin/remote-prover/src/generated/mod.rs +++ b/bin/remote-prover/src/generated/mod.rs @@ -1,4 +1,5 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[rustfmt::skip] pub mod remote_prover; diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs index 81290d73a..e543022ac 100644 --- a/bin/remote-prover/src/proxy/mod.rs +++ b/bin/remote-prover/src/proxy/mod.rs @@ -252,14 +252,12 @@ pub struct RequestQueue { impl RequestQueue { /// Create a new empty request queue - #[allow(clippy::new_without_default)] pub fn new() -> Self { QUEUE_SIZE.set(0); Self { queue: RwLock::new(VecDeque::new()) } } /// Get the length of the queue - #[allow(clippy::len_without_is_empty)] pub async fn len(&self) -> usize { self.queue.read().await.len() } diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs index aa418e8cb..ffa8f708e 100644 --- a/bin/remote-prover/src/proxy/worker.rs +++ b/bin/remote-prover/src/proxy/worker.rs @@ -159,7 +159,6 @@ impl Worker { /// Returns `Ok(())` if the worker is healthy and compatible, or `Err(reason)` if there's an /// issue. The caller should use `update_status` to apply the result to the worker's health /// status. - #[allow(clippy::too_many_lines)] #[tracing::instrument(target = COMPONENT, name = "worker.check_status")] pub async fn check_status(&mut self, supported_proof_type: ProofType) -> Result<(), String> { if !self.should_do_health_check() { diff --git a/bin/stress-test/src/seeding/metrics.rs b/bin/stress-test/src/seeding/metrics.rs index cdf32965a..56e89e4a9 100644 --- a/bin/stress-test/src/seeding/metrics.rs +++ b/bin/stress-test/src/seeding/metrics.rs @@ -76,7 +76,7 @@ impl SeedingMetrics { } /// Prints the block metrics table. - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn print_block_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "\nBlock metrics:")?; writeln!(f, "Note: Each block contains 256 transactions (16 batches * 16 transactions).")?; @@ -189,7 +189,7 @@ impl SeedingMetrics { } impl Display for SeedingMetrics { - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index fa751e1a2..3b80481bb 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -145,7 +145,7 @@ async fn generate_blocks( let mut consume_notes_txs = vec![]; let consumes_per_block = TRANSACTIONS_PER_BATCH * BATCHES_PER_BLOCK - 1; - #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] + #[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] let num_public_accounts = (consumes_per_block as f64 * (f64::from(public_accounts_percentage) / 100.0)) .round() as usize; diff --git a/bin/stress-test/src/store/metrics.rs b/bin/stress-test/src/store/metrics.rs index 95f8ce0ff..b56f36264 100644 --- a/bin/stress-test/src/store/metrics.rs +++ b/bin/stress-test/src/store/metrics.rs @@ -18,7 +18,7 @@ pub fn print_summary(timers_accumulator: &[Duration]) { } /// Computes a percentile from a list of durations. -#[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] +#[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] fn compute_percentile(times: &[Duration], percentile: f64) -> Duration { if times.is_empty() { return Duration::ZERO; diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index fa39303ae..7e83b0ae5 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -76,7 +76,7 @@ pub async fn bench_sync_state(data_directory: PathBuf, iterations: usize, concur print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_notes_per_response = responses.iter().map(|r| r.notes.len()).sum::() as f64 / responses.len() as f64; println!("Average notes per response: {average_notes_per_response}"); @@ -270,7 +270,7 @@ pub async fn bench_sync_nullifiers( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_nullifiers_per_response = responses.iter().map(|r| r.nullifiers.len()).sum::() as f64 / responses.len() as f64; println!("Average nullifiers per response: {average_nullifiers_per_response}"); @@ -364,7 +364,7 @@ pub async fn bench_sync_transactions( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_transactions_per_response = if responses.is_empty() { 0.0 } else { @@ -376,13 +376,13 @@ pub async fn bench_sync_transactions( // Calculate pagination statistics let total_runs = results.len(); let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let pagination_rate = if total_runs > 0 { (paginated_runs as f64 / total_runs as f64) * 100.0 } else { 0.0 }; - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let avg_pages = if total_runs > 0 { results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 } else { diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 5b2ab30b3..f581ca95e 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -1,5 +1,3 @@ -#![allow(dead_code, reason = "WIP: mempoool refactor")] - use std::collections::HashSet; use std::sync::Arc; @@ -127,10 +125,6 @@ impl AuthenticatedTransaction { Arc::clone(&self.inner) } - pub fn raw_proven_transaction(&self) -> &ProvenTransaction { - &self.inner - } - pub fn expires_at(&self) -> BlockNumber { self.inner.expiration_block_num() } @@ -177,4 +171,8 @@ impl AuthenticatedTransaction { self.store_account_state = None; self } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } } diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 36ab9b53d..955aa2356 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -60,7 +60,7 @@ pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); /// /// The value is selected such that all transactions should approximately be processed within one /// minutes with a block time of 5s. -#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +#[expect(clippy::cast_sign_loss, reason = "Both durations are positive")] pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( DEFAULT_MAX_BATCHES_PER_BLOCK * DEFAULT_MAX_TXS_PER_BATCH diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index fb6963efd..d7ea49db0 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -83,7 +83,6 @@ impl BlockProducer { /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is /// encountered. - #[allow(clippy::too_many_lines)] pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index dd15c8e0e..c5ecc2ccd 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -348,7 +348,7 @@ impl AccountActor { /// - After 10 attempts, the backoff period is 12 blocks. /// - After 20 attempts, the backoff period is 148 blocks. /// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +#[expect(clippy::cast_precision_loss, clippy::cast_sign_loss)] fn has_backoff_passed( chain_tip: BlockNumber, last_attempt: Option, diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs index febd14f1b..488673b91 100644 --- a/crates/ntx-builder/src/db/mod.rs +++ b/crates/ntx-builder/src/db/mod.rs @@ -51,7 +51,7 @@ impl Db { } /// Create and commit a transaction with the queries added in the provided closure. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result where Q: Send diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 5a39c1d54..4f64f4e9d 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -1,4 +1,5 @@ use std::env; +use std::fmt::Write; use std::path::{Path, PathBuf}; use fs_err as fs; @@ -92,16 +93,17 @@ fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { submodules.sort(); - let contents = submodules.iter().map(|f| format!("pub mod {f};\n")); - let contents = std::iter::once( - "#![allow(clippy::pedantic, reason = \"generated by build.rs and tonic\")]\n".to_string(), - ) - .chain(std::iter::once( - "#![allow(clippy::large_enum_variant, reason = \"generated by build.rs and tonic\")]\n\n" - .to_string(), - )) - .chain(contents) - .collect::(); + // Lints we need to allow for the generated code. + let lints = ["pedantic", "large_enum_variant", "allow_attributes"]; + let lints = lints.into_iter().fold(String::new(), |mut s, lint| { + writeln!(s, " clippy::{lint},").unwrap(); + s + }); + let lints = + format!("#![expect(\n{lints} reason = \"generated by build.rs and tonic\"\n)]\n\n"); + + let modules = submodules.iter().map(|f| format!("pub mod {f};\n")); + let contents = std::iter::once(lints).chain(modules).collect::(); fs::write(mod_filepath, contents) } diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index 61e3a5379..4ec0ae408 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -1,5 +1,9 @@ -#![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] -#![allow(clippy::large_enum_variant, reason = "generated by build.rs and tonic")] +#![expect( + clippy::pedantic, + clippy::large_enum_variant, + clippy::allow_attributes, + reason = "generated by build.rs and tonic" +)] pub mod account; pub mod block_producer; diff --git a/crates/remote-prover-client/src/lib.rs b/crates/remote-prover-client/src/lib.rs index 27b6fa049..a319793d9 100644 --- a/crates/remote-prover-client/src/lib.rs +++ b/crates/remote-prover-client/src/lib.rs @@ -2,7 +2,7 @@ // We allow unused imports here in order because this `macro_use` only makes sense for code // generated by prost under certain circumstances (when `tx-prover` is enabled and the environment // is not wasm) -#![allow(unused_imports)] +#![expect(unused_imports)] #[macro_use] extern crate alloc; diff --git a/crates/remote-prover-client/src/remote_prover/generated/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/mod.rs index 806afe903..2cd709029 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/mod.rs @@ -1,4 +1,5 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[cfg(all(feature = "std", target_arch = "wasm32"))] compile_error!("The `std` feature cannot be used when targeting `wasm32`."); diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index d29ee7166..f5e3c2b82 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -407,8 +407,7 @@ impl api_server::Api for RpcService { request: Request, ) -> Result, Status> { use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ - SlotData::MapKeys as ProtoMapKeys, - SlotData::AllEntries as ProtoMapAllEntries + SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, }; let request = request.into_inner(); @@ -505,7 +504,6 @@ fn out_of_range_error(err: E) -> Status { } /// Check, but don't repeat ourselves mapping the error -#[allow(clippy::result_large_err)] fn check(n: usize) -> Result<(), Status> { ::check(n).map_err(out_of_range_error) } diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index d015408ad..2508c9d2d 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -37,7 +37,7 @@ pub type PersistentAccountTree = AccountTree Result { - #[allow(clippy::cast_sign_loss)] + #[expect(clippy::cast_sign_loss)] Ok(NoteTag::new(raw as u32)) } @@ -189,7 +189,7 @@ pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { } #[inline(always)] -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { raw as u8 } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 85bead244..0a252b550 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -292,7 +292,7 @@ pub(crate) fn select_account_commitments_paged( use miden_protocol::utils::Serializable; // Fetch one extra to determine if there are more results - #[allow(clippy::cast_possible_wrap)] + #[expect(clippy::cast_possible_wrap)] let limit = (page_size.get() + 1) as i64; let mut query = SelectDsl::select( @@ -367,7 +367,7 @@ pub(crate) fn select_public_account_ids_paged( ) -> Result { use miden_protocol::utils::Serializable; - #[allow(clippy::cast_possible_wrap)] + #[expect(clippy::cast_possible_wrap)] let limit = (page_size.get() + 1) as i64; let mut query = SelectDsl::select(schema::accounts::table, schema::accounts::account_id) @@ -985,7 +985,7 @@ pub(crate) fn insert_account_storage_map_value( } /// Attention: Assumes the account details are NOT null! The schema explicitly allows this though! -#[allow(clippy::too_many_lines)] +#[expect(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 2b42b40a3..553430ddb 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -129,7 +129,7 @@ pub fn select_all_block_headers( #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderRawRow { - #[allow(dead_code)] + #[expect(dead_code)] pub block_num: i64, pub block_header: Vec, pub signature: Vec, diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 6de1b6ee1..2cec3523e 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -25,7 +25,7 @@ //! transaction, any nesting of further `transaction(conn, || {})` has no effect and should be //! considered unnecessary boilerplate by default. -#![allow( +#![expect( clippy::needless_pass_by_value, reason = "The parent scope does own it, passing by value avoids additional boilerplate" )] diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index ef93f0ffe..083cb15aa 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -1,4 +1,4 @@ -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] @@ -441,14 +441,7 @@ pub(crate) fn select_note_script_by_root( /// ORDER BY notes.rowid ASC /// LIMIT ?4 /// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -#[allow( - clippy::too_many_lines, - reason = "Lines will be reduced when schema is updated to simplify logic" -)] +#[expect(clippy::cast_sign_loss, reason = "row_id is a positive integer")] pub(crate) fn select_unconsumed_network_notes_by_account_id( conn: &mut SqliteConnection, account_id: AccountId, @@ -460,7 +453,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( diesel::dsl::sql::("notes.rowid >= ") .bind::(page.token.unwrap_or_default() as i64); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -470,7 +463,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( i64, // rowid (from sql::("notes.rowid")) ); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -550,7 +543,6 @@ pub struct NoteSyncRecordRawRow { pub inclusion_path: Vec, // SparseMerklePath } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for NoteSyncRecordRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -746,7 +738,7 @@ pub struct NoteMetadataRawRow { attachment: Vec, } -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] impl TryInto for NoteMetadataRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -767,7 +759,7 @@ pub struct BlockNoteIndexRawRow { pub note_index: i32, // index within batch } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] +#[expect(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for BlockNoteIndexRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -791,7 +783,6 @@ impl TryInto for BlockNoteIndexRawRow { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -822,7 +813,6 @@ pub(crate) fn insert_notes( /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index a13911388..84e89ebad 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -173,7 +173,7 @@ pub(crate) fn select_nullifiers_paged( after_nullifier: Option, ) -> Result { // Fetch one extra to determine if there are more results - #[allow(clippy::cast_possible_wrap)] + #[expect(clippy::cast_possible_wrap)] let limit = (page_size.get() + 1) as i64; let mut query = @@ -226,7 +226,6 @@ pub(crate) fn select_nullifiers_paged( /// INSERT INTO nullifiers (nullifier, nullifier_prefix, block_num) /// VALUES (?1, ?2, ?3) /// ``` -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index be132e1a5..1331d7ea5 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -150,7 +150,6 @@ impl TryInto for TransactionRecordRaw { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -161,7 +160,7 @@ pub(crate) fn insert_transactions( block_num: BlockNumber, transactions: &OrderedTransactionHeaders, ) -> Result { - #[allow(clippy::into_iter_on_ref)] // false positive + #[expect(clippy::into_iter_on_ref)] // false positive let rows: Vec<_> = transactions .as_slice() .into_iter() @@ -187,7 +186,7 @@ pub struct TransactionSummaryRowInsert { } impl TransactionSummaryRowInsert { - #[allow( + #[expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index c472940e4..1ace2abaa 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -14,7 +14,7 @@ pub(crate) fn vec_raw_try_into>( ) } -#[allow(dead_code)] +#[expect(dead_code)] /// Deserialize an iterable container full of byte blobs `B` to types `T` pub(crate) fn deserialize_raw_vec, T: Deserializable>( raw: impl IntoIterator, @@ -38,7 +38,6 @@ pub fn get_nullifier_prefix(nullifier: &Nullifier) -> u16 { /// Converts a slice of length `N` to an array, returns `None` if invariant /// isn'crates/store/src/db/mod.rs upheld. -#[allow(dead_code)] pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { if bytes.len() != N { return None; @@ -48,7 +47,7 @@ pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { Some(arr) } -#[allow(dead_code)] +#[expect(dead_code)] #[inline] pub fn from_be_to_u32(bytes: &[u8]) -> Option { slice_to_array::<4>(bytes).map(u32::from_be_bytes) @@ -62,8 +61,8 @@ pub struct PragmaSchemaVersion { } /// Returns the schema version of the database. -#[allow(dead_code)] -#[allow( +#[expect(dead_code)] +#[expect( clippy::cast_sign_loss, reason = "schema version is always positive and we will never reach 0xEFFF_..._FFFF" )] diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index f6cb0c328..57183dee4 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1,6 +1,3 @@ -#![allow(clippy::similar_names, reason = "naming dummy test values is hard")] -#![allow(clippy::too_many_lines, reason = "test code can be long")] - use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 99f658eab..cbd98af75 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -699,7 +699,7 @@ mod compile_tests { /// Ensure all enum variants remain compat with the desired /// trait bounds. Otherwise one gets very unwieldy errors. - #[allow(dead_code)] + #[expect(dead_code)] fn assumed_trait_bounds_upheld() { fn ensure_is_error(_phony: PhantomData) where diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index b39495c87..4d360e925 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -12,7 +12,6 @@ use miden_standards::account::wallets::BasicWalletError; use crate::genesis::config::TokenSymbolStr; -#[allow(missing_docs, reason = "Error variants must be descriptive by themselves")] #[derive(Debug, thiserror::Error)] pub enum GenesisConfigError { #[error(transparent)] diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 345253291..283208182 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -96,7 +96,7 @@ impl GenesisConfig { /// Convert the in memory representation into the new genesis state /// /// Also returns the set of secrets for the generated accounts. - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] pub fn into_state( self, signer: S, diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index dbea1e2e3..56bfcafb4 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -175,7 +175,6 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] #[instrument( level = "debug", target = COMPONENT, @@ -195,7 +194,6 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] #[instrument( level = "debug", target = COMPONENT, diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs index dfd0583b2..145432c97 100644 --- a/crates/store/src/state/apply_block.rs +++ b/crates/store/src/state/apply_block.rs @@ -39,7 +39,7 @@ impl State { /// - the in-memory structures are updated, including the latest block pointer and the lock is /// released. // TODO: This span is logged in a root span, we should connect it to the parent span. - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] #[instrument(target = COMPONENT, skip_all, err)] pub async fn apply_block(&self, signed_block: SignedBlock) -> Result<(), ApplyBlockError> { let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; diff --git a/crates/utils/src/config.rs b/crates/utils/src/config.rs index e0fc1a0a6..b29c9060f 100644 --- a/crates/utils/src/config.rs +++ b/crates/utils/src/config.rs @@ -15,7 +15,7 @@ pub const DEFAULT_FAUCET_SERVER_PORT: u16 = 8080; /// relative, searches in parent directories all the way to the root as well. /// /// The above configuration options are indented to support easy of packaging and deployment. -#[allow(clippy::result_large_err, reason = "This error crashes the node")] +#[expect(clippy::result_large_err, reason = "This error crashes the node")] pub fn load_config Deserialize<'a>>( config_file: impl AsRef, ) -> figment::Result { diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 1adf5be41..2b222e23e 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -13,7 +13,7 @@ /// Basic request limit. pub const GENERAL_REQUEST_LIMIT: usize = 1000; -#[allow(missing_docs)] +#[expect(missing_docs)] #[derive(Debug, thiserror::Error)] #[error("parameter {which} exceeded limit {limit}: {size}")] pub struct QueryLimitError { From cd24f2de9282106e1ee7ad1f7758d50dcf29b228 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Fri, 13 Feb 2026 15:23:09 +0100 Subject: [PATCH 32/77] fix/sqlite: ensure we're shipping the node binary with sqlite (#1669) --- Cargo.lock | 3 +++ Cargo.toml | 1 + crates/ntx-builder/Cargo.toml | 4 ++++ crates/store/Cargo.toml | 5 +++-- .../src/db/models/queries/accounts/tests.rs | 12 ++++++++++-- crates/store/src/db/tests.rs | 17 ++++++++++++++--- 6 files changed, 35 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47c301d69..c8c67c56a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2282,6 +2282,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ + "cc", "pkg-config", "vcpkg", ] @@ -2814,6 +2815,7 @@ dependencies = [ "diesel_migrations", "futures", "indexmap 2.13.0", + "libsqlite3-sys", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", @@ -2918,6 +2920,7 @@ dependencies = [ "futures", "hex", "indexmap 2.13.0", + "libsqlite3-sys", "miden-block-prover", "miden-crypto", "miden-node-proto", diff --git a/Cargo.toml b/Cargo.toml index 02978e521..db02abc0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,6 +84,7 @@ http = { version = "1.3" } humantime = { version = "2.2" } indexmap = { version = "2.12" } itertools = { version = "0.14" } +libsqlite3-sys = { features = ["bundled"], version = "0.35" } lru = { default-features = false, version = "0.16" } pretty_assertions = { version = "1.4" } # prost and protox are from different authors and are _not_ released in diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index e1d6dab84..1d34db128 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -22,6 +22,7 @@ diesel = { features = ["numeric", "sqlite"], workspace = tru diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } @@ -41,3 +42,6 @@ miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { workspace = true } rstest = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["libsqlite3-sys"] diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 8850a4c4e..d2a7b3e69 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -25,6 +25,7 @@ fs-err = { workspace = true } futures = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } miden-block-prover = { workspace = true } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } @@ -74,7 +75,7 @@ name = "account_tree" required-features = ["rocksdb"] [package.metadata.cargo-machete] -# This is an indirect dependency for which we need to enable optimisations +# This is an indirect dependency for which we need to enable optimisations/features # via feature flags. Because we don't use it directly in code, machete # identifies it as unused. -ignored = ["miden-crypto", "miden-node-rocksdb-cxx-linkage-fix"] +ignored = ["libsqlite3-sys", "miden-crypto", "miden-node-rocksdb-cxx-linkage-fix"] diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 2a4bf4078..fa1e77e85 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -652,7 +652,11 @@ fn test_select_account_vault_at_block_historical_with_updates() { account.commitment(), AccountUpdateDetails::Delta(delta), ); - upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + for block in [block_1, block_2, block_3] { + upsert_accounts(&mut conn, std::slice::from_ref(&account_update), block) + .expect("upsert_accounts failed"); + } // Insert vault asset at block 1: vault_key_1 = 1000 tokens let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ @@ -754,7 +758,11 @@ fn test_select_account_vault_at_block_with_deletion() { account.commitment(), AccountUpdateDetails::Delta(delta), ); - upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + for block in [block_1, block_2, block_3] { + upsert_accounts(&mut conn, std::slice::from_ref(&account_update), block) + .expect("upsert_accounts failed"); + } // Insert vault asset at block 1 let vault_key = AssetVaultKey::new_unchecked(Word::from([ diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 57183dee4..65e93c283 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -499,9 +499,10 @@ fn sync_account_vault_basic_validation() { create_block(conn, block_mid); create_block(conn, block_to); - // Create accounts - one public for vault assets, one private for testing - queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block_from) - .unwrap(); + for block in [block_from, block_mid, block_to] { + queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block) + .unwrap(); + } // Create some test vault assets let vault_key_1 = AssetVaultKey::new_unchecked(num_to_word(100)); @@ -1048,6 +1049,9 @@ fn sql_account_storage_map_values_insertion() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + queries::upsert_accounts(conn, &[mock_block_account_update(account_id, 0)], block1).unwrap(); + queries::upsert_accounts(conn, &[mock_block_account_update(account_id, 0)], block2).unwrap(); + let slot_name = StorageSlotName::mock(3); let key1 = Word::from([1u32, 2, 3, 4]); let key2 = Word::from([5u32, 6, 7, 8]); @@ -1119,6 +1123,11 @@ fn select_storage_map_sync_values() { let block2 = BlockNumber::from(2); let block3 = BlockNumber::from(3); + for block in [block1, block2, block3] { + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block) + .unwrap(); + } + // Insert data across multiple blocks using individual inserts // Block 1: key1 -> value1, key2 -> value2 queries::insert_account_storage_map_value( @@ -2088,6 +2097,8 @@ fn db_roundtrip_storage_map_values() { create_block(&mut conn, block_num); let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); let slot_name = StorageSlotName::mock(5); let key = num_to_word(12345); let value = num_to_word(67890); From 3ef46f21e18e7435c9a7b49b0a533723091fbc01 Mon Sep 17 00:00:00 2001 From: Philipp Date: Sat, 14 Feb 2026 10:18:42 +0400 Subject: [PATCH 33/77] docs: add macOS build prerequisites to installation page (#1672) --- docs/external/src/operator/installation.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/external/src/operator/installation.md b/docs/external/src/operator/installation.md index 1f27c639d..662d76851 100644 --- a/docs/external/src/operator/installation.md +++ b/docs/external/src/operator/installation.md @@ -39,6 +39,18 @@ command ensures that all required libraries are installed. sudo apt install llvm clang bindgen pkg-config libssl-dev libsqlite3-dev ``` +On macOS, ensure the Xcode Command Line Tools are installed: + +```sh +xcode-select --install +``` + +If you still see `'cstdint' file not found` errors after installing the Command Line Tools (common after a macOS upgrade), try setting the SDK root explicitly: + +```sh +export SDKROOT="$(xcrun --sdk macosx --show-sdk-path)" +``` + Install the latest node binary: ```sh From 791041319632353e9bbb41aef0d790d164e413d5 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 16 Feb 2026 11:13:00 +0200 Subject: [PATCH 34/77] ci: workflow cleanup (#1678) --- .github/workflows/cleanup-workflows.yml | 142 ++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 .github/workflows/cleanup-workflows.yml diff --git a/.github/workflows/cleanup-workflows.yml b/.github/workflows/cleanup-workflows.yml new file mode 100644 index 000000000..a10133f1f --- /dev/null +++ b/.github/workflows/cleanup-workflows.yml @@ -0,0 +1,142 @@ +# Manual workflow to cleanup deleted workflows runs. +# +# Github keeps workflows runs around even if the workflow is deleted. +# This has the side effect that these still display in the UI which gets cluttered. +# Once the runs of a workflow are deleted, they also get removed from the UI. +name: Cleanup Workflow + +on: + workflow_dispatch: + inputs: + mode: + description: "Choose 'dry run' to preview or 'execute' to delete runs" + required: true + default: "dry run" + type: choice + options: + - "dry run" + - "execute" + +jobs: + cleanup: + name: Cleanup deleted workflows + runs-on: ubuntu-latest + permissions: + actions: write # required for deleting workflow runs + contents: read + + steps: + - name: Checkout repo + uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Workflows on main + id: main + run: | + git fetch origin main + WORKFLOWS=$(git ls-tree -r origin/main --name-only | grep '^.github/workflows/') + echo $WORKFLOWS + echo "workflows=$WORKFLOWS" >> "$GITHUB_OUTPUT" + + - name: Workflows on next + id: next + run: | + git fetch origin next + WORKFLOWS=$(git ls-tree -r origin/next --name-only | grep '^.github/workflows/') + echo $WORKFLOWS + echo "workflows=$WORKFLOWS" >> "$GITHUB_OUTPUT" + + - name: Workflows on github + id: github + run: | + # Note that we filter by `.github` path prefix to ensure we only get locally defined workflows. + # + # Examples of non-local workflows are `dependabot` and `copilot` which have paths: + # - dynamic/dependabot/dependabot-updates + # - dynamic/copilot-pull-request-reviewer/copilot-pull-request-reviewer + WORKFLOWS=$(gh workflow list \ + --all \ + --json path \ + --jq '.[] | select(.path | startswith(".github")) | .path' \ + ) + echo $WORKFLOWS + echo "workflows=$WORKFLOWS" >> "$GITHUB_OUTPUT" + + - name: Filter for deleted workflows + id: deleted + run: | + # Union of `main` and `next` workflows. + EXISTING_FILES=$( \ + printf "%s\n%s\n" \ + "${{ steps.main.outputs.workflows }}" \ + "${{ steps.next.outputs.workflows }}" \ + ) + EXISTING_FILES=$(echo "$EXISTING_FILES" | sort -u) + echo $EXISTING_FILES + + # Find deleted workflows as the items in `WORKFLOWS` but not in the union of main and next. + # This assumes that _all_ items in main and next are present in `WORKFLOWS`. + DELETED_FILES=$( \ + printf "%s\n%s\n" \ + "$EXISTING_FILES" \ + "${{ steps.github.outputs.workflows }}" \ + ) + DELETED_FILES=$(echo "$DELETED_FILES" | sort | uniq -u) + echo $DELETED_FILES + echo "workflows=$DELETED_FILES" >> "$GITHUB_OUTPUT" + + - name: Delete runs from deleted workflows + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + MODE: ${{ inputs.mode }} + DELETED_WORKFLOWS: ${{ steps.deleted.outputs.workflows }} + run: | + set -euo pipefail + + TOTAL_AFFECTED=0 + + echo "" + echo "=== Workflow Cleanup Summary ===" + echo "" + + while IFS= read -r workflow; do + [ -z "$workflow" ] && continue + + WF_COUNT=0 + + while true; do + RUN_IDS=$(gh run list \ + --workflow "$workflow" \ + --limit 100 \ + --json databaseId \ + --jq '.[].databaseId') + + if [ -z "$RUN_IDS" ]; then + break + fi + + BATCH_COUNT=$(echo "$RUN_IDS" | wc -l | tr -d ' ') + WF_COUNT=$((WF_COUNT + BATCH_COUNT)) + + if [ "$MODE" = "execute" ]; then + for RUN_ID in $RUN_IDS; do + gh run delete "$RUN_ID" --yes >/dev/null + done + fi + done + + echo "$workflow → $WF_COUNT runs" + TOTAL_AFFECTED=$((TOTAL_AFFECTED + WF_COUNT)) + + done <<< "$DELETED_WORKFLOWS" + + echo "" + echo "--------------------------------------" + echo "Total runs affected: $TOTAL_AFFECTED" + + if [ "$MODE" = "dry run" ]; then + echo "Dry run complete. No runs were deleted." + else + echo "Cleanup complete." + fi From dea85f739746f8da70834f77d379854dc9f8a2d8 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 16 Feb 2026 12:58:37 +0100 Subject: [PATCH 35/77] feat/rpc: initial `SyncChainMmr` impl (#1636) --- .github/workflows/ci.yml | 11 +- CHANGELOG.md | 4 + bin/stress-test/README.md | 32 ++- bin/stress-test/src/main.rs | 16 +- bin/stress-test/src/store/mod.rs | 268 +++++++++++------- crates/proto/src/generated/rpc.rs | 120 +++----- crates/proto/src/generated/store.rs | 66 ++--- crates/rpc/Cargo.toml | 2 +- crates/rpc/README.md | 20 -- crates/rpc/src/server/api.rs | 26 +- crates/rpc/src/tests.rs | 61 +++- crates/store/README.md | 18 -- crates/store/src/db/mod.rs | 32 +-- .../store/src/db/models/queries/accounts.rs | 52 +--- crates/store/src/db/models/queries/mod.rs | 60 +--- .../src/db/models/queries/transactions.rs | 73 +---- crates/store/src/db/tests.rs | 144 ---------- crates/store/src/errors.rs | 13 + crates/store/src/server/rpc_api.rs | 104 +++---- crates/store/src/state/loader.rs | 2 +- crates/store/src/state/sync_state.rs | 94 +++--- crates/utils/src/limiter.rs | 31 +- docs/external/src/rpc.md | 17 +- proto/proto/internal/store.proto | 18 +- proto/proto/rpc.proto | 73 ++--- 25 files changed, 471 insertions(+), 886 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 016aeba77..f89d38d2f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -167,11 +167,12 @@ jobs: cargo run --bin miden-node-stress-test seed-store \ --data-directory ${{ env.DATA_DIR }} \ --num-accounts 500 --public-accounts-percentage 50 - - name: Benchmark state sync - run: | - cargo run --bin miden-node-stress-test benchmark-store \ - --data-directory ${{ env.DATA_DIR }} \ - --iterations 10 --concurrency 1 sync-state + # TODO re-introduce + # - name: Benchmark state sync + # run: | + # cargo run --bin miden-node-stress-test benchmark-store \ + # --data-directory ${{ env.DATA_DIR }} \ + # --iterations 10 --concurrency 1 sync-state - name: Benchmark notes sync run: | cargo run --bin miden-node-stress-test benchmark-store \ diff --git a/CHANGELOG.md b/CHANGELOG.md index 9faf9bd88..22797cadf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,9 +6,13 @@ - [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/miden-node/pull/1579)). - [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). +- [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). +- Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). ### Changes +- [BREAKING] Removed obsolete `SyncState` RPC endpoint; clients should use `SyncNotes`, `SyncNullifiers`, `SyncAccountVault`, `SyncAccountStorageMaps`, `SyncTransactions`, or `SyncChainMmr` instead ([#1636](https://github.com/0xMiden/miden-node/pull/1636)). +- Added account ID limits for `SyncTransactions`, `SyncAccountVault`, and `SyncAccountStorageMaps` to `GetLimits` responses ([#1636](https://github.com/0xMiden/miden-node/pull/1636)). - [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/miden-node/pull/1646)). - Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index 4d8c283c6..d60a61190 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -20,14 +20,14 @@ This command allows to run stress tests against the Store component. These tests The endpoints that you can test are: - `load_state` -- `sync_state` - `sync_notes` - `sync_nullifiers` - `sync_transactions` +- `sync-chain-mmr` Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. -**Note on Concurrency**: For the endpoints that support it (`sync_state`, `sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. +**Note on Concurrency**: For the endpoints that support it (`sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. Example usage: @@ -119,18 +119,6 @@ Database contains 99961 accounts and 99960 nullifiers **Performance Note**: The load-state benchmark shows that account tree loading (~21.3s) and nullifier tree loading (~21.5s) are the primary bottlenecks, while MMR loading and database connection are negligible (<3ms each). -- sync-state -``` bash -$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-state - -Average request latency: 1.120061ms -P50 request latency: 1.106042ms -P95 request latency: 1.530708ms -P99 request latency: 1.919209ms -P99.9 request latency: 5.795125ms -Average notes per response: 1.3159 -``` - - sync-notes ``` bash $ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-notes @@ -171,5 +159,21 @@ Pagination statistics: Average pages per run: 2.00 ``` +- sync-chain-mmr +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-chain-mmr --block-range 1000 + +Average request latency: 1.021ms +P50 request latency: 0.981ms +P95 request latency: 1.412ms +P99 request latency: 1.822ms +P99.9 request latency: 3.174ms +Pagination statistics: + Total runs: 10000 + Runs triggering pagination: 1 + Pagination rate: 0.01% + Average pages per run: 1.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index 095b04caf..a5cc82f9f 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -4,9 +4,9 @@ use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; use store::{ + bench_sync_chain_mmr, bench_sync_notes, bench_sync_nullifiers, - bench_sync_state, bench_sync_transactions, load_state, }; @@ -70,8 +70,6 @@ pub enum Endpoint { #[arg(short, long, value_name = "PREFIXES", default_value = "10")] prefixes: usize, }, - #[command(name = "sync-state")] - SyncState, #[command(name = "sync-notes")] SyncNotes, #[command(name = "sync-transactions")] @@ -83,6 +81,12 @@ pub enum Endpoint { #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "100")] block_range: u32, }, + #[command(name = "sync-chain-mmr")] + SyncChainMmr { + /// Block range size for each request (number of blocks to query). + #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "1000")] + block_range: u32, + }, #[command(name = "load-state")] LoadState, } @@ -111,9 +115,6 @@ async fn main() { Endpoint::SyncNullifiers { prefixes } => { bench_sync_nullifiers(data_directory, iterations, concurrency, prefixes).await; }, - Endpoint::SyncState => { - bench_sync_state(data_directory, iterations, concurrency).await; - }, Endpoint::SyncNotes => { bench_sync_notes(data_directory, iterations, concurrency).await; }, @@ -127,6 +128,9 @@ async fn main() { ) .await; }, + Endpoint::SyncChainMmr { block_range } => { + bench_sync_chain_mmr(data_directory, iterations, concurrency, block_range).await; + }, Endpoint::LoadState => { load_state(&data_directory).await; }, diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index 7e83b0ae5..3b9811d6e 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -24,9 +24,6 @@ mod metrics; // CONSTANTS // ================================================================================================ -/// Number of accounts used in each `sync_state` call. -const ACCOUNTS_PER_SYNC_STATE: usize = 5; - /// Number of accounts used in each `sync_notes` call. const ACCOUNTS_PER_SYNC_NOTES: usize = 15; @@ -36,77 +33,6 @@ const NOTE_IDS_PER_NULLIFIERS_CHECK: usize = 20; /// Number of attempts the benchmark will make to reach the store before proceeding. const STORE_STATUS_RETRIES: usize = 10; -// SYNC STATE -// ================================================================================================ - -/// Sends multiple `sync_state` requests to the store and prints the performance. -/// -/// Arguments: -/// - `data_directory`: directory that contains the database dump file and the accounts ids dump -/// file. -/// - `iterations`: number of requests to send. -/// - `concurrency`: number of requests to send in parallel. -pub async fn bench_sync_state(data_directory: PathBuf, iterations: usize, concurrency: usize) { - // load accounts from the dump file - let accounts_file = data_directory.join(ACCOUNTS_FILENAME); - let accounts = fs::read_to_string(&accounts_file) - .await - .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); - let mut account_ids = accounts.lines().map(|a| AccountId::from_hex(a).unwrap()).cycle(); - - let (store_client, _) = start_store(data_directory).await; - - wait_for_store(&store_client).await.unwrap(); - - // each request will have 5 account ids, 5 note tags and will be sent with block number 0 - let request = |_| { - let mut client = store_client.clone(); - let account_batch: Vec = - account_ids.by_ref().take(ACCOUNTS_PER_SYNC_STATE).collect(); - tokio::spawn(async move { sync_state(&mut client, account_batch, 0).await }) - }; - - // create a stream of tasks to send sync_notes requests - let (timers_accumulator, responses) = stream::iter(0..iterations) - .map(request) - .buffer_unordered(concurrency) - .map(|res| res.unwrap()) - .collect::<(Vec<_>, Vec<_>)>() - .await; - - print_summary(&timers_accumulator); - - #[expect(clippy::cast_precision_loss)] - let average_notes_per_response = - responses.iter().map(|r| r.notes.len()).sum::() as f64 / responses.len() as f64; - println!("Average notes per response: {average_notes_per_response}"); -} - -/// Sends a single `sync_state` request to the store and returns a tuple with: -/// - the elapsed time. -/// - the response. -pub async fn sync_state( - api_client: &mut RpcClient>, - account_ids: Vec, - block_num: u32, -) -> (Duration, proto::rpc::SyncStateResponse) { - let note_tags = account_ids - .iter() - .map(|id| u32::from(NoteTag::with_account_target(*id))) - .collect::>(); - - let account_ids = account_ids - .iter() - .map(|id| proto::account::AccountId { id: id.to_bytes() }) - .collect::>(); - - let sync_request = proto::rpc::SyncStateRequest { block_num, note_tags, account_ids }; - - let start = Instant::now(); - let response = api_client.sync_state(sync_request).await.unwrap(); - (start.elapsed(), response.into_inner()) -} - // SYNC NOTES // ================================================================================================ @@ -197,61 +123,68 @@ pub async fn bench_sync_nullifiers( .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); let account_ids: Vec = accounts .lines() - .take(ACCOUNTS_PER_SYNC_STATE) + .take(ACCOUNTS_PER_SYNC_NOTES) .map(|a| AccountId::from_hex(a).unwrap()) .collect(); - // get all nullifier prefixes from the store + // Get all nullifier prefixes from the store using sync_notes let mut nullifier_prefixes: Vec = vec![]; let mut current_block_num = 0; loop { - // get the accounts notes - let (_, response) = - sync_state(&mut store_client, account_ids.clone(), current_block_num).await; + // Get the accounts notes using sync_notes + let note_tags: Vec = account_ids + .iter() + .map(|id| u32::from(NoteTag::with_account_target(*id))) + .collect(); + let sync_request = proto::rpc::SyncNotesRequest { + block_range: Some(proto::rpc::BlockRange { + block_from: current_block_num, + block_to: None, + }), + note_tags, + }; + let response = store_client.sync_notes(sync_request).await.unwrap().into_inner(); + let note_ids = response .notes .iter() .map(|n| n.note_id.unwrap()) .collect::>(); - // get the notes nullifiers, limiting to 20 notes maximum + // Get the notes nullifiers, limiting to 20 notes maximum let note_ids_to_fetch = note_ids.iter().take(NOTE_IDS_PER_NULLIFIERS_CHECK).copied().collect::>(); - let notes = store_client - .get_notes_by_id(proto::note::NoteIdList { ids: note_ids_to_fetch }) - .await - .unwrap() - .into_inner() - .notes; - - nullifier_prefixes.extend( - notes - .iter() - .filter_map(|n| { - // private notes are filtered out because `n.details` is None - let details_bytes = n.note.as_ref()?.details.as_ref()?; - let details = NoteDetails::read_from_bytes(details_bytes).unwrap(); - Some(u32::from(details.nullifier().prefix())) - }) - .collect::>(), - ); + if !note_ids_to_fetch.is_empty() { + let notes = store_client + .get_notes_by_id(proto::note::NoteIdList { ids: note_ids_to_fetch }) + .await + .unwrap() + .into_inner() + .notes; + + nullifier_prefixes.extend( + notes + .iter() + .filter_map(|n| { + // Private notes are filtered out because `n.details` is None + let details_bytes = n.note.as_ref()?.details.as_ref()?; + let details = NoteDetails::read_from_bytes(details_bytes).unwrap(); + Some(u32::from(details.nullifier().prefix())) + }) + .collect::>(), + ); + } - // Use the response from the first chunk to update block number - // (all chunks should return the same block header for the same block_num) - let (_, first_response) = sync_state( - &mut store_client, - account_ids[..1000.min(account_ids.len())].to_vec(), - current_block_num, - ) - .await; - current_block_num = first_response.block_header.unwrap().block_num; - if first_response.chain_tip == current_block_num { + // Update block number from pagination info + let pagination_info = response.pagination_info.expect("pagination_info should exist"); + current_block_num = pagination_info.block_num; + if pagination_info.chain_tip == current_block_num { break; } } let mut nullifiers = nullifier_prefixes.into_iter().cycle(); - // each request will have `prefixes_per_request` prefixes and block number 0 + // Each request will have `prefixes_per_request` prefixes and block number 0 let request = |_| { let mut client = store_client.clone(); @@ -260,7 +193,7 @@ pub async fn bench_sync_nullifiers( tokio::spawn(async move { sync_nullifiers(&mut client, nullifiers_batch).await }) }; - // create a stream of tasks to send the requests + // Create a stream of tasks to send the requests let (timers_accumulator, responses) = stream::iter(0..iterations) .map(request) .buffer_unordered(concurrency) @@ -481,6 +414,121 @@ async fn sync_transactions_paginated( } } +// SYNC CHAIN MMR +// ================================================================================================ + +/// Sends multiple `sync_chain_mmr` requests to the store and prints the performance. +/// +/// Arguments: +/// - `data_directory`: directory that contains the database dump file. +/// - `iterations`: number of requests to send. +/// - `concurrency`: number of requests to send in parallel. +/// - `block_range_size`: number of blocks to include per request. +pub async fn bench_sync_chain_mmr( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + block_range_size: u32, +) { + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + let chain_tip = store_client.clone().status(()).await.unwrap().into_inner().chain_tip; + let block_range_size = block_range_size.max(1); + + let request = |_| { + let mut client = store_client.clone(); + tokio::spawn(async move { + sync_chain_mmr_paginated(&mut client, chain_tip, block_range_size).await + }) + }; + + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + + print_summary(&timers_accumulator); + + let total_runs = results.len(); + let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); + #[expect(clippy::cast_precision_loss)] + let pagination_rate = if total_runs > 0 { + (paginated_runs as f64 / total_runs as f64) * 100.0 + } else { + 0.0 + }; + #[expect(clippy::cast_precision_loss)] + let avg_pages = if total_runs > 0 { + results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 + } else { + 0.0 + }; + + println!("Pagination statistics:"); + println!(" Total runs: {total_runs}"); + println!(" Runs triggering pagination: {paginated_runs}"); + println!(" Pagination rate: {pagination_rate:.2}%"); + println!(" Average pages per run: {avg_pages:.2}"); +} + +/// Sends a single `sync_chain_mmr` request to the store and returns a tuple with: +/// - the elapsed time. +/// - the response. +pub async fn sync_chain_mmr( + api_client: &mut RpcClient>, + block_from: u32, + block_to: u32, +) -> (Duration, proto::rpc::SyncChainMmrResponse) { + let sync_request = proto::rpc::SyncChainMmrRequest { + block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), + }; + + let start = Instant::now(); + let response = api_client.sync_chain_mmr(sync_request).await.unwrap(); + (start.elapsed(), response.into_inner()) +} + +#[derive(Clone)] +struct SyncChainMmrRun { + duration: Duration, + pages: usize, +} + +async fn sync_chain_mmr_paginated( + api_client: &mut RpcClient>, + chain_tip: u32, + block_range_size: u32, +) -> SyncChainMmrRun { + let mut total_duration = Duration::default(); + let mut pages = 0usize; + let mut next_block_from = 0u32; + + loop { + let target_block_to = next_block_from.saturating_add(block_range_size).min(chain_tip); + let (elapsed, response) = + sync_chain_mmr(api_client, next_block_from, target_block_to).await; + total_duration += elapsed; + pages += 1; + + let pagination_info = response.pagination_info.expect("pagination_info should exist"); + let _mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + + if pagination_info.block_num >= pagination_info.chain_tip { + break; + } + + next_block_from = pagination_info.block_num; + } + + SyncChainMmrRun { duration: total_duration, pages } +} + // LOAD STATE // ================================================================================================ diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 0f436386a..cc3273e14 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -428,51 +428,27 @@ pub struct SyncNotesResponse { #[prost(message, repeated, tag = "4")] pub notes: ::prost::alloc::vec::Vec, } -/// State synchronization request. -/// -/// Specifies state updates the requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -/// `account_ids` for that block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateRequest { - /// Last block known by the requester. The response will contain data starting from the next block, - /// until the first block which contains a note of matching the requested tag, or the chain tip - /// if there are no notes. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Accounts' commitment to include in the response. +/// Chain MMR synchronization request. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncChainMmrRequest { + /// Block range from which to synchronize the chain MMR. /// - /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is - /// possible there was an update to the account for the given range, but if it is not the latest, - /// it won't be included in the response. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "3")] - pub note_tags: ::prost::alloc::vec::Vec, + /// The response will contain MMR delta starting after `block_range.block_from` up to + /// `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + /// block already present in the caller's MMR so the delta begins at the next block. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, } -/// Represents the result of syncing state request. +/// Represents the result of syncing chain MMR. #[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateResponse { - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// Block header of the block with the first note matching the specified criteria. +pub struct SyncChainMmrResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// Data needed to update the partial MMR from `request.block_range.block_from + 1` to + /// `pagination_info.block_num`. #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - #[prost(message, optional, tag = "3")] pub mmr_delta: ::core::option::Option, - /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - #[prost(message, repeated, tag = "5")] - pub accounts: ::prost::alloc::vec::Vec, - /// List of transactions executed against requested accounts between `request.block_num + 1` and - /// `response.block_header.block_num`. - #[prost(message, repeated, tag = "6")] - pub transactions: ::prost::alloc::vec::Vec, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "7")] - pub notes: ::prost::alloc::vec::Vec, } /// Storage map synchronization request. /// @@ -585,7 +561,7 @@ pub struct TransactionRecord { #[derive(Clone, PartialEq, ::prost::Message)] pub struct RpcLimits { /// Maps RPC endpoint names to their parameter limits. - /// Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + /// Key: endpoint name (e.g., "CheckNullifiers") /// Value: map of parameter names to their limit values #[prost(map = "string, message", tag = "1")] pub endpoints: ::std::collections::HashMap< @@ -1076,26 +1052,11 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also - /// returns Chain MMR delta that can be used to update the state of Chain MMR. This includes - /// both chain MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - pub async fn sync_state( + pub async fn sync_chain_mmr( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1107,9 +1068,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncChainMmr"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncChainMmr")); self.inner.unary(req, path, codec).await } } @@ -1275,26 +1236,11 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also - /// returns Chain MMR delta that can be used to update the state of Chain MMR. This includes - /// both chain MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - async fn sync_state( + async fn sync_chain_mmr( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; } @@ -2041,23 +1987,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncState" => { + "/rpc.Api/SyncChainMmr" => { #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; + struct SyncChainMmrSvc(pub Arc); + impl tonic::server::UnaryService + for SyncChainMmrSvc { + type Response = super::SyncChainMmrResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_state(&inner, request).await + ::sync_chain_mmr(&inner, request).await }; Box::pin(fut) } @@ -2068,7 +2014,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStateSvc(inner); + let method = SyncChainMmrSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 5fad016e1..49081b933 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -639,26 +639,12 @@ pub mod rpc_client { req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - pub async fn sync_state( + /// Returns chain MMR updates within a block range. + pub async fn sync_chain_mmr( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -670,9 +656,9 @@ pub mod rpc_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncState"); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncChainMmr"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncState")); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncChainMmr")); self.inner.unary(req, path, codec).await } /// Returns account vault updates for specified account within a block range. @@ -862,26 +848,12 @@ pub mod rpc_server { tonic::Response, tonic::Status, >; - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - async fn sync_state( + /// Returns chain MMR updates within a block range. + async fn sync_chain_mmr( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns account vault updates for specified account within a block range. @@ -1394,25 +1366,27 @@ pub mod rpc_server { }; Box::pin(fut) } - "/store.Rpc/SyncState" => { + "/store.Rpc/SyncChainMmr" => { #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); + struct SyncChainMmrSvc(pub Arc); impl< T: Rpc, - > tonic::server::UnaryService - for SyncStateSvc { - type Response = super::super::rpc::SyncStateResponse; + > tonic::server::UnaryService + for SyncChainMmrSvc { + type Response = super::super::rpc::SyncChainMmrResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request< + super::super::rpc::SyncChainMmrRequest, + >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_state(&inner, request).await + ::sync_chain_mmr(&inner, request).await }; Box::pin(fut) } @@ -1423,7 +1397,7 @@ pub mod rpc_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStateSvc(inner); + let method = SyncChainMmrSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 30ec4dcb8..926fe0ee8 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -38,7 +38,7 @@ url = { workspace = true } [dev-dependencies] miden-air = { features = ["testing"], workspace = true } -miden-node-store = { workspace = true } +miden-node-store = { features = ["rocksdb"], workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { workspace = true } diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 4d3cf9387..13c8debce 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -24,7 +24,6 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [SubmitProvenTransaction](#submitproventransaction) - [SyncAccountVault](#SyncAccountVault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) - [SyncTransactions](#synctransactions) @@ -215,25 +214,6 @@ When note synchronization fails, detailed error information is provided through --- -### SyncState - -Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts and -notes) the client is interested in. - -**Limits:** `account_id` (1000), `note_tag` (1000) - -This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block -number in the chain. Client is expected to repeat these requests in a loop until -`response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. - -Each request also returns info about new notes, accounts, etc. created. It also returns Chain MMR delta that can be -used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -For preserving some degree of privacy, note tags contain only high part of hashes. Thus, returned data contains excessive -notes, client can make additional filtering of that data on its side. - ---- - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index f5e3c2b82..96836add9 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -192,16 +192,13 @@ impl api_server::Api for RpcService { self.store.clone().get_block_header_by_number(request).await } - async fn sync_state( + async fn sync_chain_mmr( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); - check::(request.get_ref().account_ids.len())?; - check::(request.get_ref().note_tags.len())?; - - self.store.clone().sync_state(request).await + self.store.clone().sync_chain_mmr(request).await } async fn sync_account_storage_maps( @@ -536,11 +533,16 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), ), ( - "SyncState".into(), - endpoint_limits(&[ - (AccountId::PARAM_NAME, AccountId::LIMIT), - (NoteTag::PARAM_NAME, NoteTag::LIMIT), - ]), + "SyncTransactions".into(), + endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), + ), + ( + "SyncAccountVault".into(), + endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), + ), + ( + "SyncAccountStorageMaps".into(), + endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), ), ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index a0b7854e5..472e62daf 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -13,7 +13,6 @@ use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, QueryParamNoteIdLimit, - QueryParamNoteTagLimit, QueryParamNullifierLimit, }; use miden_protocol::Word; @@ -496,27 +495,43 @@ async fn get_limits_endpoint() { limits.endpoints.get("CheckNullifiers").expect("CheckNullifiers should exist"); assert_eq!( - check_nullifiers.parameters.get("nullifier"), + check_nullifiers.parameters.get(QueryParamNullifierLimit::PARAM_NAME), Some(&(QueryParamNullifierLimit::LIMIT as u32)), - "CheckNullifiers nullifier limit should be {}", + "CheckNullifiers {} limit should be {}", + QueryParamNullifierLimit::PARAM_NAME, QueryParamNullifierLimit::LIMIT ); - // Verify SyncState endpoint has multiple parameters - let sync_state = limits.endpoints.get("SyncState").expect("SyncState should exist"); + let sync_transactions = + limits.endpoints.get("SyncTransactions").expect("SyncTransactions should exist"); assert_eq!( - sync_state.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + sync_transactions.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), Some(&(QueryParamAccountIdLimit::LIMIT as u32)), - "SyncState {} limit should be {}", + "SyncTransactions {} limit should be {}", QueryParamAccountIdLimit::PARAM_NAME, QueryParamAccountIdLimit::LIMIT ); + + let sync_account_vault = + limits.endpoints.get("SyncAccountVault").expect("SyncAccountVault should exist"); assert_eq!( - sync_state.parameters.get(QueryParamNoteTagLimit::PARAM_NAME), - Some(&(QueryParamNoteTagLimit::LIMIT as u32)), - "SyncState {} limit should be {}", - QueryParamNoteTagLimit::PARAM_NAME, - QueryParamNoteTagLimit::LIMIT + sync_account_vault.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + Some(&(QueryParamAccountIdLimit::LIMIT as u32)), + "SyncAccountVault {} limit should be {}", + QueryParamAccountIdLimit::PARAM_NAME, + QueryParamAccountIdLimit::LIMIT + ); + + let sync_account_storage_maps = limits + .endpoints + .get("SyncAccountStorageMaps") + .expect("SyncAccountStorageMaps should exist"); + assert_eq!( + sync_account_storage_maps.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + Some(&(QueryParamAccountIdLimit::LIMIT as u32)), + "SyncAccountStorageMaps {} limit should be {}", + QueryParamAccountIdLimit::PARAM_NAME, + QueryParamAccountIdLimit::LIMIT ); // Verify GetNotesById endpoint @@ -532,3 +547,25 @@ async fn get_limits_endpoint() { // Shutdown to avoid runtime drop error. shutdown_store(store_runtime).await; } + +#[tokio::test] +async fn sync_chain_mmr_returns_delta() { + let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + let request = proto::rpc::SyncChainMmrRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), + }; + let response = rpc_client.sync_chain_mmr(request).await.expect("sync_chain_mmr should succeed"); + let response = response.into_inner(); + + let pagination_info = response.pagination_info.expect("pagination_info should exist"); + assert_eq!(pagination_info.chain_tip, 0); + assert_eq!(pagination_info.block_num, 0); + + let mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + assert_eq!(mmr_delta.forest, 0); + assert!(mmr_delta.data.is_empty()); + + shutdown_store(store_runtime).await; +} diff --git a/crates/store/README.md b/crates/store/README.md index ea44889d0..3ca7e19aa 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -54,7 +54,6 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [SyncNullifiers](#syncnullifiers) - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) - [SyncTransactions](#synctransactions) @@ -228,23 +227,6 @@ When note synchronization fails, detailed error information is provided through --- -### SyncState - -Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts, -notes, nullifiers) the client is interested in. - -This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block -number in the chain. Client is expected to repeat these requests in a loop until -`response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. - -Each request also returns info about new notes, nullifiers etc. created. It also returns Chain MMR delta that can be -used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -For preserving some degree of privacy, note tags and nullifiers filters contain only high part of hashes. Thus, returned -data contains excessive notes and nullifiers, client can make additional filtering of that data on its side. - ---- - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index a9b77eb9b..54bf22501 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -4,7 +4,7 @@ use std::path::PathBuf; use anyhow::Context; use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; -use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; +use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; @@ -36,7 +36,7 @@ pub use crate::db::models::queries::{ PublicAccountIdsPage, }; use crate::db::models::{Page, queries}; -use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; +use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError}; use crate::genesis::GenesisBlock; pub(crate) mod manager; @@ -93,13 +93,6 @@ impl PartialEq<(Nullifier, BlockNumber)> for NullifierInfo { } } -#[derive(Debug, PartialEq)] -pub struct TransactionSummary { - pub account_id: AccountId, - pub block_num: BlockNumber, - pub transaction_id: TransactionId, -} - #[derive(Debug, PartialEq)] pub struct TransactionRecord { pub block_num: BlockNumber, @@ -177,14 +170,6 @@ impl From for proto::note::NoteSyncRecord { } } -#[derive(Debug, PartialEq)] -pub struct StateSyncUpdate { - pub notes: Vec, - pub block_header: BlockHeader, - pub account_updates: Vec, - pub transactions: Vec, -} - #[derive(Debug, PartialEq)] pub struct NoteSyncUpdate { pub notes: Vec, @@ -521,19 +506,6 @@ impl Db { .await } - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn get_state_sync( - &self, - block_number: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result { - self.transact::("state sync", move |conn| { - queries::get_state_sync(conn, block_number, account_ids, note_tags) - }) - .await - } - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_note_sync( &self, diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 0a252b550..9e01c15c1 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -18,11 +18,7 @@ use diesel::{ SqliteConnection, }; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; -use miden_node_utils::limiter::{ - MAX_RESPONSE_PAYLOAD_BYTES, - QueryParamAccountIdLimit, - QueryParamLimiter, -}; +use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ @@ -45,7 +41,8 @@ use miden_protocol::utils::{Deserializable, Serializable}; use crate::COMPONENT; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; -use crate::db::models::{serialize_vec, vec_raw_try_into}; +#[cfg(test)] +use crate::db::models::vec_raw_try_into; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; @@ -484,49 +481,6 @@ pub(crate) fn select_account_vault_assets( Ok((last_block_included, values)) } -/// Select [`AccountSummary`] from the DB using the given [`SqliteConnection`], given that the -/// account update was in the given block range (inclusive). -/// -/// # Returns -/// -/// The vector of [`AccountSummary`] with the matching accounts. -/// -/// # Raw SQL -/// -/// ```sql -/// SELECT -/// account_id, -/// account_commitment, -/// block_num -/// FROM -/// accounts -/// WHERE -/// block_num > ?1 AND -/// block_num <= ?2 AND -/// account_id IN (?3) -/// ORDER BY -/// block_num ASC -/// ``` -pub fn select_accounts_by_block_range( - conn: &mut SqliteConnection, - account_ids: &[AccountId], - block_range: RangeInclusive, -) -> Result, DatabaseError> { - QueryParamAccountIdLimit::check(account_ids.len())?; - - let desired_account_ids = serialize_vec(account_ids); - let raw: Vec = - SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) - .filter(schema::accounts::block_num.gt(block_range.start().to_raw_sql())) - .filter(schema::accounts::block_num.le(block_range.end().to_raw_sql())) - .filter(schema::accounts::account_id.eq_any(desired_account_ids)) - .order(schema::accounts::block_num.asc()) - .load::(conn)?; - // SAFETY `From` implies `TryFrom `AccountSummary` - Ok(vec_raw_try_into(raw).unwrap()) -} - /// Select all accounts from the DB using the given [`SqliteConnection`]. /// /// # Returns diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 2cec3523e..35c38c5ad 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -25,21 +25,14 @@ //! transaction, any nesting of further `transaction(conn, || {})` has no effect and should be //! considered unnecessary boilerplate by default. -#![expect( - clippy::needless_pass_by_value, - reason = "The parent scope does own it, passing by value avoids additional boilerplate" -)] - use diesel::SqliteConnection; use miden_crypto::dsa::ecdsa_k256_keccak::Signature; -use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::OrderedTransactionHeaders; use super::DatabaseError; -use crate::db::{NoteRecord, StateSyncUpdate}; -use crate::errors::StateSyncError; +use crate::db::NoteRecord; mod transactions; pub use transactions::*; @@ -77,52 +70,3 @@ pub(crate) fn apply_block( count += insert_nullifiers_for_block(conn, nullifiers, block_header.block_num())?; Ok(count) } - -/// Loads the state necessary for a state sync -/// -/// The state sync covers from `from_start_block` until the last block that has a note matching the -/// given `note_tags`. -pub(crate) fn get_state_sync( - conn: &mut SqliteConnection, - from_start_block: BlockNumber, - account_ids: Vec, - note_tags: Vec, -) -> Result { - let chain_tip = select_block_header_by_block_num(conn, None)? - .expect("Chain tip is not found") - .block_num(); - - // Sync notes from the starting block to the latest in the chain. - let block_range = from_start_block..=chain_tip; - - // select notes since block by tag and sender - let (notes, _) = select_notes_since_block_by_tag_and_sender( - conn, - &account_ids[..], - ¬e_tags[..], - block_range, - )?; - - // select block header by block num - let maybe_note_block_num = notes.first().map(|note| note.block_num); - let block_header: BlockHeader = select_block_header_by_block_num(conn, maybe_note_block_num)? - .ok_or_else(|| StateSyncError::EmptyBlockHeadersTable)?; - - // select accounts by block range - let to_end_block = block_header.block_num(); - let account_updates = - select_accounts_by_block_range(conn, &account_ids, from_start_block..=to_end_block)?; - - // select transactions by accounts and block range - let transactions = select_transactions_by_accounts_and_block_range( - conn, - &account_ids, - from_start_block..=to_end_block, - )?; - Ok(StateSyncUpdate { - notes, - block_header, - account_updates, - transactions, - }) -} diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 1331d7ea5..3e7e30df2 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -27,67 +27,7 @@ use super::DatabaseError; use crate::COMPONENT; use crate::db::models::conv::SqlTypeConvert; use crate::db::models::{serialize_vec, vec_raw_try_into}; -use crate::db::{TransactionSummary, schema}; - -/// Select transactions for given accounts in a specified block range -/// -/// # Parameters -/// * `account_ids`: List of account IDs to filter by -/// - Limit: 0 <= size <= 1000 -/// * `block_range`: Range of blocks to include inclusive -/// -/// # Returns -/// -/// A vector of [`TransactionSummary`] types or an error. -/// -/// # Raw SQL -/// ```sql -/// SELECT -/// account_id, -/// block_num, -/// transaction_id -/// FROM -/// transactions -/// WHERE -/// block_num > ?1 AND -/// block_num <= ?2 AND -/// account_id IN (?3) -/// ORDER BY -/// transaction_id ASC -/// ``` -pub fn select_transactions_by_accounts_and_block_range( - conn: &mut SqliteConnection, - account_ids: &[AccountId], - block_range: RangeInclusive, -) -> Result, DatabaseError> { - QueryParamAccountIdLimit::check(account_ids.len())?; - - let desired_account_ids = serialize_vec(account_ids); - let raw = SelectDsl::select( - schema::transactions::table, - ( - schema::transactions::account_id, - schema::transactions::block_num, - schema::transactions::transaction_id, - ), - ) - .filter(schema::transactions::block_num.gt(block_range.start().to_raw_sql())) - .filter(schema::transactions::block_num.le(block_range.end().to_raw_sql())) - .filter(schema::transactions::account_id.eq_any(desired_account_ids)) - .order(schema::transactions::transaction_id.asc()) - .load::(conn) - .map_err(DatabaseError::from)?; - vec_raw_try_into(raw) -} - -#[derive(Debug, Clone, PartialEq, Queryable, Selectable, QueryableByName)] -#[diesel(table_name = schema::transactions)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct TransactionSummaryRaw { - account_id: Vec, - block_num: i64, - transaction_id: Vec, -} +use crate::db::schema; #[derive(Debug, Clone, PartialEq, Queryable, Selectable, QueryableByName)] #[diesel(table_name = schema::transactions)] @@ -103,17 +43,6 @@ pub struct TransactionRecordRaw { size_in_bytes: i64, } -impl TryInto for TransactionSummaryRaw { - type Error = DatabaseError; - fn try_into(self) -> Result { - Ok(crate::db::TransactionSummary { - account_id: AccountId::read_from_bytes(&self.account_id[..])?, - block_num: BlockNumber::from_raw_sql(self.block_num)?, - transaction_id: TransactionId::read_from_bytes(&self.transaction_id[..])?, - }) - } -} - impl TryInto for TransactionRecordRaw { type Error = DatabaseError; fn try_into(self) -> Result { diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 65e93c283..8266b8739 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -49,7 +49,6 @@ use miden_protocol::note::{ use miden_protocol::testing::account_id::{ ACCOUNT_ID_PRIVATE_SENDER, ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, - ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; @@ -70,7 +69,6 @@ use pretty_assertions::assert_eq; use rand::Rng; use super::{AccountInfo, NoteRecord, NullifierInfo}; -use crate::db::TransactionSummary; use crate::db::migrations::apply_migrations; use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; use crate::db::models::{Page, queries, utils}; @@ -160,33 +158,6 @@ fn sql_insert_transactions() { assert_eq!(count, 2, "Two elements must have been inserted"); } -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_select_transactions() { - fn query_transactions(conn: &mut SqliteConnection) -> Vec { - queries::select_transactions_by_accounts_and_block_range( - conn, - &[AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap()], - BlockNumber::GENESIS..=BlockNumber::from(2), - ) - .unwrap() - } - - let mut conn = create_db(); - let conn = &mut conn; - let transactions = query_transactions(conn); - - assert!(transactions.is_empty(), "No elements must be initially in the DB"); - - let count = insert_transactions(conn); - - assert_eq!(count, 2, "Two elements must have been inserted"); - - let transactions = query_transactions(conn); - - assert_eq!(transactions.len(), 2, "Two elements must be in the DB"); -} - #[test] #[miden_node_test_macro::enable_logging] fn sql_select_nullifiers() { @@ -808,80 +779,6 @@ fn db_block_header() { assert_eq!(res, [block_header, block_header2]); } -#[test] -#[miden_node_test_macro::enable_logging] -fn db_account() { - let mut conn = create_db(); - let conn = &mut conn; - let block_num: BlockNumber = 1.into(); - create_block(conn, block_num); - - // test empty table - let account_ids: Vec = - [ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, 1, 2, 3, 4, 5] - .iter() - .map(|acc_id| (*acc_id).try_into().unwrap()) - .collect(); - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - BlockNumber::GENESIS..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); - - // test insertion - let account_id = ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE; - let account_commitment = num_to_word(0); - - let row_count = queries::upsert_accounts( - conn, - &[BlockAccountUpdate::new( - account_id.try_into().unwrap(), - account_commitment, - AccountUpdateDetails::Private, - )], - block_num, - ) - .unwrap(); - - assert_eq!(row_count, 1); - - // test successful query - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - BlockNumber::GENESIS..=u32::MAX.into(), - ) - .unwrap(); - assert_eq!( - res, - vec![AccountSummary { - account_id: account_id.try_into().unwrap(), - account_commitment, - block_num, - }] - ); - - // test query for update outside the block range - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - (block_num.as_u32() + 1).into()..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); - - // test query with unknown accounts - let res = queries::select_accounts_by_block_range( - conn, - &[6.try_into().unwrap(), 7.try_into().unwrap(), 8.try_into().unwrap()], - (block_num + 1)..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); -} - #[test] #[miden_node_test_macro::enable_logging] fn notes() { @@ -2010,47 +1907,6 @@ fn db_roundtrip_notes() { ); } -#[test] -#[miden_node_test_macro::enable_logging] -fn db_roundtrip_transactions() { - let mut conn = create_db(); - let block_num = BlockNumber::from(1); - create_block(&mut conn, block_num); - - let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) - .unwrap(); - - let tx = mock_block_transaction(account_id, 1); - let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); - - // Insert - queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); - - // Retrieve - let retrieved = queries::select_transactions_by_accounts_and_block_range( - &mut conn, - &[account_id], - BlockNumber::GENESIS..=BlockNumber::from(2), - ) - .unwrap(); - - assert_eq!(retrieved.len(), 1, "Should have one transaction"); - let retrieved_tx = &retrieved[0]; - - assert_eq!( - tx.account_id(), - retrieved_tx.account_id, - "AccountId DB roundtrip must be symmetric" - ); - assert_eq!( - tx.id(), - retrieved_tx.transaction_id, - "TransactionId DB roundtrip must be symmetric" - ); - assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); -} - #[test] #[miden_node_test_macro::enable_logging] fn db_roundtrip_vault_assets() { diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index cbd98af75..947a0bcfc 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -359,6 +359,19 @@ pub enum StateSyncError { FailedToBuildMmrDelta(#[from] MmrError), } +#[derive(Error, Debug, GrpcError)] +pub enum SyncChainMmrError { + #[error("invalid block range")] + InvalidBlockRange(#[source] InvalidBlockRange), + #[error("start block is not known")] + FutureBlock { + chain_tip: BlockNumber, + block_from: BlockNumber, + }, + #[error("malformed block number")] + DeserializationFailed(#[source] ConversionError), +} + impl From for StateSyncError { fn from(value: diesel::result::Error) -> Self { Self::DatabaseError(DatabaseError::from(value)) diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 6c78e1ebf..f5d12d6b4 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,4 +1,6 @@ use miden_node_proto::convert; +use miden_node_proto::domain::block::InvalidBlockRange; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; use miden_node_utils::limiter::{ @@ -10,6 +12,7 @@ use miden_node_utils::limiter::{ }; use miden_protocol::Word; use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; use miden_protocol::note::NoteId; use tonic::{Request, Response, Status}; use tracing::{debug, info}; @@ -24,6 +27,7 @@ use crate::errors::{ NoteSyncError, SyncAccountStorageMapsError, SyncAccountVaultError, + SyncChainMmrError, SyncNullifiersError, SyncTransactionsError, }; @@ -118,54 +122,6 @@ impl rpc_server::Rpc for StoreApi { })) } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects the client is interested in. - async fn sync_state( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - - let account_ids: Vec = read_account_ids::(&request.account_ids)?; - - let (state, delta) = self - .state - .sync_state(request.block_num.into(), account_ids, request.note_tags) - .await - .map_err(internal_error)?; - - let accounts = state - .account_updates - .into_iter() - .map(|account_info| proto::account::AccountSummary { - account_id: Some(account_info.account_id.into()), - account_commitment: Some(account_info.account_commitment.into()), - block_num: account_info.block_num.as_u32(), - }) - .collect(); - - let transactions = state - .transactions - .into_iter() - .map(|transaction_summary| proto::transaction::TransactionSummary { - account_id: Some(transaction_summary.account_id.into()), - block_num: transaction_summary.block_num.as_u32(), - transaction_id: Some(transaction_summary.transaction_id.into()), - }) - .collect(); - - let notes = state.notes.into_iter().map(Into::into).collect(); - - Ok(Response::new(proto::rpc::SyncStateResponse { - chain_tip: self.state.latest_block_num().await.as_u32(), - block_header: Some(state.block_header.into()), - mmr_delta: Some(delta.into()), - accounts, - transactions, - notes, - })) - } - /// Returns info which can be used by the client to sync note state. async fn sync_notes( &self, @@ -197,6 +153,58 @@ impl rpc_server::Rpc for StoreApi { })) } + /// Returns chain MMR updates within a block range. + async fn sync_chain_mmr( + &self, + request: Request, + ) -> Result, Status> { + // TODO find a reasonable upper boundary + const MAX_BLOCKS: u32 = 1 << 20; + + let request = request.into_inner(); + let chain_tip = self.state.latest_block_num().await; + + let block_range = request + .block_range + .ok_or_else(|| proto::rpc::SyncChainMmrRequest::missing_field(stringify!(block_range))) + .map_err(SyncChainMmrError::DeserializationFailed)?; + + let block_from = BlockNumber::from(block_range.block_from); + if block_from > chain_tip { + Err(SyncChainMmrError::FutureBlock { chain_tip, block_from })?; + } + + let block_to = block_range.block_to.map_or(chain_tip, BlockNumber::from).min(chain_tip); + + if block_from > block_to { + Err(SyncChainMmrError::InvalidBlockRange(InvalidBlockRange::StartGreaterThanEnd { + start: block_from, + end: block_to, + }))?; + } + let block_range = block_from..=block_to; + let len = 1 + block_range.end().as_u32() - block_range.start().as_u32(); + let trimmed_block_range = if len > MAX_BLOCKS { + block_from..=BlockNumber::from(block_from.as_u32() + MAX_BLOCKS) + } else { + block_range + }; + + let mmr_delta = self + .state + .sync_chain_mmr(trimmed_block_range.clone()) + .await + .map_err(internal_error)?; + + Ok(Response::new(proto::rpc::SyncChainMmrResponse { + pagination_info: Some(proto::rpc::PaginationInfo { + chain_tip: chain_tip.as_u32(), + block_num: trimmed_block_range.end().as_u32(), + }), + mmr_delta: Some(mmr_delta.into()), + })) + } + /// Returns a list of [`Note`]s for the specified [`NoteId`]s. /// /// If the list is empty or no [`Note`] matched the requested [`NoteId`] and empty list is diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 66c5efb44..d237716f3 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -346,7 +346,7 @@ pub async fn load_mmr(db: &mut Db) -> Result, + ) -> Result { + let inner = self.inner.read().await; + + let block_from = *block_range.start(); + let block_to = *block_range.end(); + + if block_from == block_to { + return Ok(MmrDelta { + forest: Forest::new(block_from.as_usize()), + data: vec![], + }); + } + + // Important notes about the boundary conditions: + // + // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root + // contained in the block header always lag behind by one block, this is because the Mmr + // leaves are hashes of block headers, and we can't have self-referential hashes. These + // two points cancel out and don't require adjusting. + // - Mmr::get_delta is inclusive, whereas the sync request block_from is defined to be the + // last block already present in the caller's MMR. The delta should therefore start at the + // next block, so the from_forest has to be adjusted with a +1. + let from_forest = (block_from + 1).as_usize(); + let to_forest = block_to.as_usize(); + + inner + .blockchain + .as_mmr() + .get_delta(Forest::new(from_forest), Forest::new(to_forest)) + .map_err(StateSyncError::FailedToBuildMmrDelta) + } + /// Loads data to synchronize a client's notes. /// /// The client's request contains a list of tags, this method will return the first @@ -83,59 +120,4 @@ impl State { ) -> Result { self.db.select_storage_map_sync_values(account_id, block_range).await } - - // FULL STATE SYNCHRONIZATION - // -------------------------------------------------------------------------------------------- - - /// Loads data to synchronize a client. - /// - /// The client's request contains a list of note tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filtered based on this - /// block range. - /// - /// # Arguments - /// - /// - `block_num`: The last block *known* by the client, updates start from the next block. - /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's - /// block range. - /// - `note_tags`: The tags the client is interested in, result is restricted to the first block - /// with any matches tags. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_state( - &self, - block_num: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { - let inner = self.inner.read().await; - - let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; - - let delta = if block_num == state_sync.block_header.block_num() { - // The client is in sync with the chain tip. - MmrDelta { - forest: Forest::new(block_num.as_usize()), - data: vec![], - } - } else { - // Important notes about the boundary conditions: - // - // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root - // contained in the block header always lag behind by one block, this is because the Mmr - // leaves are hashes of block headers, and we can't have self-referential hashes. These - // two points cancel out and don't require adjusting. - // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to - // be - // exclusive, so the from_forest has to be adjusted with a +1 - let from_forest = (block_num + 1).as_usize(); - let to_forest = state_sync.block_header.block_num().as_usize(); - inner - .blockchain - .as_mmr() - .get_delta(Forest::new(from_forest), Forest::new(to_forest)) - .map_err(StateSyncError::FailedToBuildMmrDelta)? - }; - - Ok((state_sync, delta)) - } } diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 2b222e23e..821b6755c 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -46,21 +46,21 @@ pub trait QueryParamLimiter { /// store. pub const MAX_RESPONSE_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; -/// Used for the following RPC endpoints -/// * `state_sync` +/// Used for the following RPC endpoints: +/// * `sync_transactions` /// /// Capped at 1000 account IDs to keep SQL `IN` clauses bounded and response payloads under the -/// 4 MB budget. +/// 4 MB budget. pub struct QueryParamAccountIdLimit; impl QueryParamLimiter for QueryParamAccountIdLimit { const PARAM_NAME: &str = "account_id"; const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints +/// Used for the following RPC endpoints: /// * `select_nullifiers_by_prefix` /// -/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload +/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload /// budget and to avoid unbounded prefix scans. pub struct QueryParamNullifierPrefixLimit; impl QueryParamLimiter for QueryParamNullifierPrefixLimit { @@ -68,12 +68,11 @@ impl QueryParamLimiter for QueryParamNullifierPrefixLimit { const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints +/// Used for the following RPC endpoints: /// * `select_nullifiers_by_prefix` /// * `sync_nullifiers` -/// * `sync_state` /// -/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. +/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. pub struct QueryParamNullifierLimit; impl QueryParamLimiter for QueryParamNullifierLimit { const PARAM_NAME: &str = "nullifier"; @@ -83,7 +82,7 @@ impl QueryParamLimiter for QueryParamNullifierLimit { /// Used for the following RPC endpoints /// * `get_note_sync` /// -/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. +/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. pub struct QueryParamNoteTagLimit; impl QueryParamLimiter for QueryParamNoteTagLimit { const PARAM_NAME: &str = "note_tag"; @@ -103,7 +102,7 @@ impl QueryParamLimiter for QueryParamNoteIdLimit { /// Used for internal queries retrieving note inclusion proofs by commitment. /// -/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB +/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB /// payload cap. pub struct QueryParamNoteCommitmentLimit; impl QueryParamLimiter for QueryParamNoteCommitmentLimit { @@ -114,13 +113,23 @@ impl QueryParamLimiter for QueryParamNoteCommitmentLimit { /// Only used internally, not exposed via public RPC. /// /// Capped at 1000 block headers to bound internal batch operations and keep payloads below the -/// 4 MB limit. +/// 4 MB limit. pub struct QueryParamBlockLimit; impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; const LIMIT: usize = GENERAL_REQUEST_LIMIT; } +/// Used for the following RPC endpoints: +/// * `sync_chain_mmr` +/// +/// Capped at 1000 blocks to keep MMR deltas within the 4 MB payload budget. +pub struct QueryParamBlockRangeLimit; +impl QueryParamLimiter for QueryParamBlockRangeLimit { + const PARAM_NAME: &str = "block_range"; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; +} + /// Used for the following RPC endpoints /// * `get_account` /// diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index e25bbd54d..08ba2fc3f 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -22,7 +22,6 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [SyncNullifiers](#syncnullifiers) - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) - [SyncTransactions](#synctransactions) - [Status](#status) @@ -141,7 +140,9 @@ This endpoint allows clients to discover the maximum number of items that can be "endpoints": { "CheckNullifiers": { "parameters": { "nullifier": 1000 } }, "SyncNullifiers": { "parameters": { "nullifier": 1000 } }, - "SyncState": { "parameters": { "account_id": 1000, "note_tag": 1000 } }, + "SyncTransactions": { "parameters": { "account_id": 1000 } }, + "SyncAccountVault": { "parameters": { "account_id": 1000 } }, + "SyncAccountStorageMaps": { "parameters": { "account_id": 1000 } }, "SyncNotes": { "parameters": { "note_tag": 1000 } }, "GetNotesById": { "parameters": { "note_id": 100 } } } @@ -207,18 +208,6 @@ A basic note sync can be implemented by repeatedly requesting the previous respo **Limits:** `note_tag` (1000) -### SyncState - -Iteratively sync data for specific notes and accounts. - -This request returns the next block containing data of interest. Client is expected to repeat these requests in a loop until the response reaches the head of the chain, at which point the data is fully synced. - -Each update response also contains info about new notes, accounts etc. created. It also returns Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -The low part of note tags are redacted to preserve some degree of privacy. Returned data therefore contains additional notes which should be filtered out by the client. - -**Limits:** `account_id` (1000), `note_tag` (1000) - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index c71e853da..1012476d1 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -63,22 +63,8 @@ service Rpc { // tip of the chain. rpc SyncNotes(rpc.SyncNotesRequest) returns (rpc.SyncNotesResponse) {} - // Returns info which can be used by the requester to sync up to the latest state of the chain - // for the objects (accounts, notes, nullifiers) the requester is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. requester is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the requester is fully synchronized with the chain. - // - // Each request also returns info about new notes, nullifiers etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags and nullifiers filters contain only high - // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - // additional filtering of that data on its side. - rpc SyncState(rpc.SyncStateRequest) returns (rpc.SyncStateResponse) {} + // Returns chain MMR updates within a block range. + rpc SyncChainMmr(rpc.SyncChainMmrRequest) returns (rpc.SyncChainMmrResponse) {} // Returns account vault updates for specified account within a block range. rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index b120963f2..3a189d6c1 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -103,22 +103,7 @@ service Api { // Returns storage map updates for specified account and storage slots within a block range. rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - // Returns info which can be used by the client to sync up to the latest state of the chain - // for the objects (accounts and notes) the client is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. Client is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the client is fully synchronized with the chain. - // - // Each update response also contains info about new notes, accounts etc. created. It also - // returns Chain MMR delta that can be used to update the state of Chain MMR. This includes - // both chain MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags contain only high - // part of hashes. Thus, returned data contains excessive notes, client can make - // additional filtering of that data on its side. - rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} + rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} } // RPC STATUS @@ -494,51 +479,27 @@ message SyncNotesResponse { repeated note.NoteSyncRecord notes = 4; } -// SYNC STATE +// SYNC CHAIN MMR // ================================================================================================ -// State synchronization request. -// -// Specifies state updates the requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -// `account_ids` for that block range. -message SyncStateRequest { - // Last block known by the requester. The response will contain data starting from the next block, - // until the first block which contains a note of matching the requested tag, or the chain tip - // if there are no notes. - fixed32 block_num = 1; - - // Accounts' commitment to include in the response. +// Chain MMR synchronization request. +message SyncChainMmrRequest { + // Block range from which to synchronize the chain MMR. // - // An account commitment will be included if-and-only-if it is the latest update. Meaning it is - // possible there was an update to the account for the given range, but if it is not the latest, - // it won't be included in the response. - repeated account.AccountId account_ids = 2; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 3; + // The response will contain MMR delta starting after `block_range.block_from` up to + // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + // block already present in the caller's MMR so the delta begins at the next block. + BlockRange block_range = 1; } -// Represents the result of syncing state request. -message SyncStateResponse { - // Number of the latest block in the chain. - fixed32 chain_tip = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - primitives.MmrDelta mmr_delta = 3; - - // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - repeated account.AccountSummary accounts = 5; - - // List of transactions executed against requested accounts between `request.block_num + 1` and - // `response.block_header.block_num`. - repeated transaction.TransactionSummary transactions = 6; +// Represents the result of syncing chain MMR. +message SyncChainMmrResponse { + // Pagination information. + PaginationInfo pagination_info = 1; - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 7; + // Data needed to update the partial MMR from `request.block_range.block_from + 1` to + // `pagination_info.block_num`. + primitives.MmrDelta mmr_delta = 2; } // SYNC ACCOUNT STORAGE MAP @@ -658,7 +619,7 @@ message TransactionRecord { // Represents the query parameter limits for RPC endpoints. message RpcLimits { // Maps RPC endpoint names to their parameter limits. - // Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + // Key: endpoint name (e.g., "CheckNullifiers") // Value: map of parameter names to their limit values map endpoints = 1; } From 632f0ba5dbb771020deb90bcdfecfe86a59dfdc9 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 16 Feb 2026 13:45:01 +0100 Subject: [PATCH 36/77] feat/db: cleanup old account state data db entries (#1645) --- CHANGELOG.md | 1 + .../2026020600000_cleanup_indices/down.sql | 4 + .../2026020600000_cleanup_indices/up.sql | 9 + crates/store/src/db/mod.rs | 2 + .../store/src/db/models/queries/accounts.rs | 81 +++++- crates/store/src/db/tests.rs | 250 +++++++++++++++++- crates/store/src/inner_forest/mod.rs | 2 + 7 files changed, 345 insertions(+), 4 deletions(-) create mode 100644 crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql create mode 100644 crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql diff --git a/CHANGELOG.md b/CHANGELOG.md index 22797cadf..e14f06844 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ ### Enhancements +- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/miden-node/issues/1304)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). diff --git a/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql b/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql new file mode 100644 index 000000000..1195d70bd --- /dev/null +++ b/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql @@ -0,0 +1,4 @@ +-- Reverse the cleanup indices migration + +DROP INDEX IF EXISTS idx_vault_cleanup; +DROP INDEX IF EXISTS idx_storage_cleanup; diff --git a/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql b/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql new file mode 100644 index 000000000..b98f55c6d --- /dev/null +++ b/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql @@ -0,0 +1,9 @@ +-- Add indices to optimize cleanup queries that delete old non-latest entries. +-- +-- These partial indices only include rows where is_latest = 0, making them: +-- - Smaller (only index rows that will eventually be deleted) +-- - Faster for cleanup operations (direct lookup of old entries) +-- - No overhead for is_latest = 1 rows (which are never deleted) + +CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; +CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 54bf22501..0b8f0fd42 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -582,6 +582,8 @@ impl Db { tracing::warn!(target: COMPONENT, "failed to send notification for successful block application, potential deadlock"); } + models::queries::prune_history(conn, signed_block.header().block_num())?; + acquire_done.blocking_recv()?; Ok(()) diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 9e01c15c1..da0d875a9 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -871,11 +871,13 @@ pub(crate) fn insert_account_vault_asset( // First, update any existing rows with the same (account_id, vault_key) to set // is_latest=false let vault_key: Word = vault_key.into(); + let vault_key_bytes = vault_key.to_bytes(); + let account_id_bytes = account_id.to_bytes(); let update_count = diesel::update(schema::account_vault_assets::table) .filter( schema::account_vault_assets::account_id - .eq(&account_id.to_bytes()) - .and(schema::account_vault_assets::vault_key.eq(&vault_key.to_bytes())) + .eq(account_id_bytes) + .and(schema::account_vault_assets::vault_key.eq(vault_key_bytes)) .and(schema::account_vault_assets::is_latest.eq(true)), ) .set(schema::account_vault_assets::is_latest.eq(false)) @@ -1206,3 +1208,78 @@ pub(crate) struct AccountStorageMapRowInsert { pub(crate) value: Vec, pub(crate) is_latest: bool, } + +// CLEANUP FUNCTIONS +// ================================================================================================ + +/// Number of historical blocks to retain for vault assets and storage map values. +/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be deleted, +/// except for entries marked with `is_latest=true` which are always retained. +pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; + +/// Clean up old entries for all accounts, deleting entries older than the retention window. +/// +/// Deletes rows where `block_num < chain_tip - HISTORICAL_BLOCK_RETENTION` and `is_latest = false`. +/// This is a simple and efficient approach that doesn't require window functions. +/// +/// # Returns +/// A tuple of `(vault_assets_deleted, storage_map_values_deleted)` +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +pub(crate) fn prune_history( + conn: &mut SqliteConnection, + chain_tip: BlockNumber, +) -> Result<(usize, usize), DatabaseError> { + let cutoff_block = i64::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + tracing::Span::current().record("cutoff_block", cutoff_block); + let vault_deleted = prune_account_vault_assets(conn, cutoff_block)?; + let storage_deleted = prune_account_storage_map_values(conn, cutoff_block)?; + + Ok((vault_deleted, storage_deleted)) +} + +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +fn prune_account_vault_assets( + conn: &mut SqliteConnection, + cutoff_block: i64, +) -> Result { + diesel::delete( + schema::account_vault_assets::table.filter( + schema::account_vault_assets::block_num + .lt(cutoff_block) + .and(schema::account_vault_assets::is_latest.eq(false)), + ), + ) + .execute(conn) + .map_err(DatabaseError::Diesel) +} + +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +fn prune_account_storage_map_values( + conn: &mut SqliteConnection, + cutoff_block: i64, +) -> Result { + diesel::delete( + schema::account_storage_map_values::table.filter( + schema::account_storage_map_values::block_num + .lt(cutoff_block) + .and(schema::account_storage_map_values::is_latest.eq(false)), + ), + ) + .execute(conn) + .map_err(DatabaseError::Diesel) +} diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 8266b8739..2c132c5d8 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -70,7 +70,11 @@ use rand::Rng; use super::{AccountInfo, NoteRecord, NullifierInfo}; use crate::db::migrations::apply_migrations; -use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; +use crate::db::models::queries::{ + HISTORICAL_BLOCK_RETENTION, + StorageMapValue, + insert_account_storage_map_value, +}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; @@ -2098,7 +2102,7 @@ fn db_roundtrip_account_storage_with_maps() { #[test] #[miden_node_test_macro::enable_logging] -fn test_note_metadata_with_attachment_roundtrip() { +fn db_roundtrip_note_metadata_attachment() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2149,3 +2153,245 @@ fn test_note_metadata_with_attachment_roundtrip() { "NetworkAccountTarget should have the correct target account ID" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn test_prune_history() { + let mut conn = create_db(); + let conn = &mut conn; + + let public_account_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Create blocks around the retention window. + const GENESIS_BLOCK_NUM: u32 = 0; + const OLD_BLOCK_OFFSET: u32 = 1; + const CUTOFF_BLOCK_OFFSET: u32 = 2; + const UPDATE_BLOCK_OFFSET: u32 = 3; + + let block_0: BlockNumber = GENESIS_BLOCK_NUM.into(); + let block_old: BlockNumber = OLD_BLOCK_OFFSET.into(); + let block_cutoff: BlockNumber = CUTOFF_BLOCK_OFFSET.into(); + let block_update: BlockNumber = UPDATE_BLOCK_OFFSET.into(); + let block_tip: BlockNumber = (HISTORICAL_BLOCK_RETENTION + CUTOFF_BLOCK_OFFSET).into(); + + for block in [block_0, block_old, block_cutoff, block_update, block_tip] { + create_block(conn, block); + } + + // Create account + for block in [block_0, block_old, block_cutoff, block_update, block_tip] { + queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block) + .unwrap(); + } + + // Insert vault assets at different blocks + let vault_key_old = AssetVaultKey::new_unchecked(num_to_word(100)); + let vault_key_cutoff = AssetVaultKey::new_unchecked(num_to_word(200)); + let vault_key_recent = AssetVaultKey::new_unchecked(num_to_word(300)); + let asset_1 = Asset::Fungible(FungibleAsset::new(public_account_id, 1000).unwrap()); + let asset_2 = Asset::Fungible(FungibleAsset::new(public_account_id, 2000).unwrap()); + let asset_3 = Asset::Fungible(FungibleAsset::new(public_account_id, 3000).unwrap()); + + // Old entry at block_old (should be deleted when cutoff is at block_cutoff for + // chain_tip=block_tip) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_old, + vault_key_old, + Some(asset_1), + ) + .unwrap(); + + // Entry exactly at cutoff (block_cutoff, should be retained) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_cutoff, + vault_key_cutoff, + Some(asset_2), + ) + .unwrap(); + + // Recent entry (should always be retained) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_tip, + vault_key_recent, + Some(asset_3), + ) + .unwrap(); + + // Update an entry to create a non-latest version + let updated_asset = Asset::Fungible(FungibleAsset::new(public_account_id, 1500).unwrap()); + queries::insert_account_vault_asset( + conn, + public_account_id, + block_update, + vault_key_old, + Some(updated_asset), + ) + .unwrap(); + + // Insert storage map values at different blocks + let slot_name = StorageSlotName::mock(5); + let map_key_old = num_to_word(10); + let map_key_cutoff = num_to_word(20); + let map_key_recent = num_to_word(30); + let value_1 = num_to_word(111); + let value_2 = num_to_word(222); + let value_3 = num_to_word(333); + let value_updated = num_to_word(444); + + // Old storage map entry at block_old + insert_account_storage_map_value( + conn, + public_account_id, + block_old, + slot_name.clone(), + map_key_old, + value_1, + ) + .unwrap(); + + // Storage map entry at cutoff boundary (block_cutoff) + insert_account_storage_map_value( + conn, + public_account_id, + block_cutoff, + slot_name.clone(), + map_key_cutoff, + value_2, + ) + .unwrap(); + + // Recent storage map entry + insert_account_storage_map_value( + conn, + public_account_id, + block_tip, + slot_name.clone(), + map_key_recent, + value_3, + ) + .unwrap(); + + // Update map_key_old to create a non-latest entry at block_update + insert_account_storage_map_value( + conn, + public_account_id, + block_update, + slot_name.clone(), + map_key_old, + value_updated, + ) + .unwrap(); + + // Verify initial state - should have 4 vault assets and 4 storage map values + let (_, initial_vault_assets) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert_eq!(initial_vault_assets.len(), 4, "should have 4 vault assets before cleanup"); + + let initial_storage_values = + queries::select_account_storage_map_values(conn, public_account_id, block_0..=block_tip) + .unwrap(); + assert_eq!( + initial_storage_values.values.len(), + 4, + "should have 4 storage map values before cleanup" + ); + + // Run cleanup with chain_tip = block_tip, cutoff will be block_tip - HISTORICAL_BLOCK_RETENTION + // = block_cutoff + let (vault_deleted, storage_deleted) = queries::prune_history(conn, block_tip).unwrap(); + + // Verify deletions occurred + assert_eq!(vault_deleted, 1, "should delete 1 old vault asset"); + assert_eq!(storage_deleted, 1, "should delete 1 old storage map value"); + + // Verify remaining vault assets - should have 3 (cutoff, update, tip) + let (_, remaining_vault_assets) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert_eq!(remaining_vault_assets.len(), 3, "should have 3 vault assets after cleanup"); + + // Verify no vault asset at block_old remains + assert!( + !remaining_vault_assets.iter().any(|v| v.block_num == block_old), + "block_old vault asset should be deleted" + ); + + // Verify vault assets at block_cutoff, block_update, block_tip remain + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_cutoff), + "block_cutoff vault asset should be retained (at cutoff)" + ); + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_update), + "block_update vault asset should be retained" + ); + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_tip), + "block_tip vault asset should be retained" + ); + + // Verify remaining storage map values - should have 3 (cutoff, update, tip) + let remaining_storage_values = + queries::select_account_storage_map_values(conn, public_account_id, block_0..=block_tip) + .unwrap(); + assert_eq!( + remaining_storage_values.values.len(), + 3, + "should have 3 storage map values after cleanup" + ); + + // Verify no storage map value at block_old remains + assert!( + !remaining_storage_values.values.iter().any(|v| v.block_num == block_old), + "block_old storage map value should be deleted" + ); + + // Verify storage map values at block_cutoff, block_update, block_tip remain + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_cutoff), + "block_cutoff storage map value should be retained (at cutoff)" + ); + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_update), + "block_update storage map value should be retained" + ); + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_tip), + "block_tip storage map value should be retained" + ); + + // Test that is_latest=true entries are never deleted, even if old + // Insert an old entry marked as latest + let vault_key_old_latest = AssetVaultKey::new_unchecked(num_to_word(999)); + let asset_old = Asset::Fungible(FungibleAsset::new(public_account_id, 9999).unwrap()); + queries::insert_account_vault_asset( + conn, + public_account_id, + block_0, + vault_key_old_latest, + Some(asset_old), + ) + .unwrap(); + + // This entry at block 0 is marked as is_latest=true by insert_account_vault_asset + // Run cleanup again + let (vault_deleted_2, _) = queries::prune_history(conn, block_tip).unwrap(); + + // The old latest entry should not be deleted (vault_deleted_2 should be 0) + assert_eq!(vault_deleted_2, 0, "should not delete any is_latest=true entries"); + + // Verify the old latest entry still exists + let (_, vault_assets_with_latest) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert!( + vault_assets_with_latest + .iter() + .any(|v| v.block_num == block_0 && v.vault_key == vault_key_old_latest), + "is_latest=true entry should be retained even if old" + ); +} diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 330a63d80..c2b5b495b 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -597,4 +597,6 @@ impl InnerForest { ); } } + + // TODO: tie in-memory forest retention to DB pruning policy once forest queries rely on it. } From ffd3f4d335736e66b53ccd84805c32a2e17e2500 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 17 Feb 2026 10:38:10 +0100 Subject: [PATCH 37/77] fix: compile fix from missed merge (#1683) --- crates/store/Cargo.toml | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index d5f50aafa..315f49761 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -15,21 +15,24 @@ version.workspace = true workspace = true [dependencies] -anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } -fs-err = { workspace = true } -hex = { version = "0.4" } -indexmap = { workspace = true } -libsqlite3-sys = { workspace = true } -miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } -miden-node-proto = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-utils = { workspace = true } -miden-standards = { workspace = true } +anyhow = { workspace = true } +deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } +deadpool-diesel = { features = ["sqlite"], version = "0.6" } +deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } +futures = { workspace = true } +hex = { version = "0.4" } +indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } +miden-block-prover = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } +miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } @@ -47,7 +50,7 @@ tracing = { workspace = true } url = { workspace = true } [build-dependencies] -miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { optional = true, workspace = true } [dev-dependencies] assert_matches = { workspace = true } From 2340d92ccb8c582bf4289aab6c546d36f5f31e80 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 17 Feb 2026 11:39:30 +0200 Subject: [PATCH 38/77] ci: workflow cleanup follow-up (#1679) --- .github/workflows/cleanup-workflows.yml | 270 ++++++++++++++++++------ 1 file changed, 206 insertions(+), 64 deletions(-) diff --git a/.github/workflows/cleanup-workflows.yml b/.github/workflows/cleanup-workflows.yml index a10133f1f..a7a6d2b42 100644 --- a/.github/workflows/cleanup-workflows.yml +++ b/.github/workflows/cleanup-workflows.yml @@ -36,106 +36,248 @@ jobs: run: | git fetch origin main WORKFLOWS=$(git ls-tree -r origin/main --name-only | grep '^.github/workflows/') - echo $WORKFLOWS - echo "workflows=$WORKFLOWS" >> "$GITHUB_OUTPUT" + printf "%s\n" $WORKFLOWS + { + echo "workflows<> "$GITHUB_OUTPUT" - name: Workflows on next id: next run: | git fetch origin next WORKFLOWS=$(git ls-tree -r origin/next --name-only | grep '^.github/workflows/') - echo $WORKFLOWS - echo "workflows=$WORKFLOWS" >> "$GITHUB_OUTPUT" - - - name: Workflows on github - id: github - run: | - # Note that we filter by `.github` path prefix to ensure we only get locally defined workflows. - # - # Examples of non-local workflows are `dependabot` and `copilot` which have paths: - # - dynamic/dependabot/dependabot-updates - # - dynamic/copilot-pull-request-reviewer/copilot-pull-request-reviewer - WORKFLOWS=$(gh workflow list \ - --all \ - --json path \ - --jq '.[] | select(.path | startswith(".github")) | .path' \ - ) - echo $WORKFLOWS - echo "workflows=$WORKFLOWS" >> "$GITHUB_OUTPUT" + printf "%s\n" $WORKFLOWS + { + echo "workflows<> "$GITHUB_OUTPUT" - name: Filter for deleted workflows id: deleted + env: + GH_TOKEN: ${{ github.token }} run: | - # Union of `main` and `next` workflows. - EXISTING_FILES=$( \ - printf "%s\n%s\n" \ + set -euo pipefail + + # Union of `main` and `next` workflows as a JSON array of strings (paths) + EXISTING=$(printf "%s\n%s\n" \ "${{ steps.main.outputs.workflows }}" \ "${{ steps.next.outputs.workflows }}" \ ) - EXISTING_FILES=$(echo "$EXISTING_FILES" | sort -u) - echo $EXISTING_FILES - - # Find deleted workflows as the items in `WORKFLOWS` but not in the union of main and next. - # This assumes that _all_ items in main and next are present in `WORKFLOWS`. - DELETED_FILES=$( \ - printf "%s\n%s\n" \ - "$EXISTING_FILES" \ - "${{ steps.github.outputs.workflows }}" \ + EXISTING=$(echo "$EXISTING" | sort -u | jq -R . | jq -s .) + + echo "Existing workflows:" + echo "$EXISTING" + + # Get workflows currently on GitHub as JSON array of objects + GITHUB=$(gh api repos/{owner}/{repo}/actions/workflows \ + --jq '.workflows[] | select(.path | startswith(".github")) | { name, node_id, path }' \ + | jq -s '.') + + echo "Workflows on GitHub:" + echo "$GITHUB" + + # Find deleted workflows: present on GitHub but not in main/next + DELETED=$(echo "$GITHUB" | jq -c \ + --argjson existing "$EXISTING" ' + map(select(.path as $p | $existing | index($p) | not)) + ' ) - DELETED_FILES=$(echo "$DELETED_FILES" | sort | uniq -u) - echo $DELETED_FILES - echo "workflows=$DELETED_FILES" >> "$GITHUB_OUTPUT" + echo "Deleted workflows:" + echo "$DELETED" + + # Output to GitHub Actions + { + echo "workflows<> "$GITHUB_OUTPUT" + + # Performs the actual run deletion. + # + # This contains a lot of code, but the vast majority is just pretty-printing. - name: Delete runs from deleted workflows env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} MODE: ${{ inputs.mode }} - DELETED_WORKFLOWS: ${{ steps.deleted.outputs.workflows }} + WORKFLOWS: ${{ steps.deleted.outputs.workflows }} + OWNER: ${{ github.repository_owner }} + REPO: ${{ github.repository }} + shell: bash --noprofile --norc -euo pipefail {0} run: | - set -euo pipefail + if [ -z "$WORKFLOWS" ]; then + echo "No workflows to delete." + exit 0 + fi - TOTAL_AFFECTED=0 + # ================================================================================================ + # Utility functions + # ================================================================================================ + + # Fetches a page of workflow runs for a given workflow ID and cursor. + # + # We use github's graphql API here which allows us to paginate over workflow runs. + # Unfortunately `gh run list` does not support pagination, so we use the graphql API instead. + gh_workflow_run_page() { + local id="$1" + local cursor="$2" - echo "" - echo "=== Workflow Cleanup Summary ===" - echo "" + gh api graphql -F workflowId="$id" -F after="$cursor" \ + -f query='query($workflowId: ID!, $after: String) { + node(id: $workflowId) { + ... on Workflow { + runs(first: 100, after: $after) { + pageInfo { hasNextPage endCursor } + nodes { databaseId } + } + } + } + }' + } - while IFS= read -r workflow; do - [ -z "$workflow" ] && continue + # ================================================================================================ + # Print helpers for nice progress and table display + # ================================================================================================ - WF_COUNT=0 + # Column widths (table includes three spacers for ' | ' between columns) + widths_index=9 + widths_name=30 + widths_count=14 + widths_total=12 + widths_table=$(( $widths_index + 3 + $widths_name + 3 + $widths_count + 3 + $widths_total )) + # Repeats a character a given number of times. + repeat_char() { + local char=$1 + local count=$2 + printf "%0.s$char" $(seq 1 $count) + } + + # Prints the given header as `====
====` to match the table layout. + print_table_header() { + local header="$1" + local header_len=${#header} + local left_pad=$(( ( $widths_table - header_len - 2) / 2 )) + local right_pad=$(( $widths_table - header_len - 2 - left_pad )) + printf " \n%s %s %s\n" $(repeat_char = $left_pad) "$header" $(repeat_char = $right_pad) + } + + # Prints |---+---+---+---| with appropriate widths to accomodate the table headers. + print_table_separator() { + printf "%s+%s+%s+%s\n" \ + "$(repeat_char - $((widths_index + 1)))" \ + "$(repeat_char - $((widths_name + 2)))" \ + "$(repeat_char - $((widths_count + 2)))" \ + "$(repeat_char - $((widths_total + 1)))" + } + + # Prints a row of the table (index, workflow name, workflow count, global total) + print_table_row() { + local index=$1 + local name=$2 + local count=$3 + local total=$4 + printf "%*s | %-*s | %*s | %*s\n" \ + "$widths_index" "$index" \ + "$widths_name" "$name" \ + "$widths_count" "$count" \ + "$widths_total" "$total" + } + + # Alias for print_table_row() with empty index and total columns. + print_summary_row() { + local name=$1 + local count=$2 + print_table_row "" "$name" "$count" "" + } + + # ================================================================================================ + # Print progress table header + # ================================================================================================ + print_table_header "Workflow Cleanup Progress" + print_table_row "Index" "Workflow" "Workflow Count" "Global Total" + print_table_separator + + # ================================================================================================ + # Core workflow loop, iterate over workflows + # ================================================================================================ + + n_workflows=$(echo "$WORKFLOWS" | jq -r '. | length') + total=0 + summary=() + index=0 + + mapfile -t WF_ARRAY < <(echo "$WORKFLOWS" | jq -c '.[]') + for wf in "${WF_ARRAY[@]}"; do + index=$((index + 1)) + name=$(echo "$wf" | jq -r '.name') + count=0 + id=$(echo "$wf" | jq -r '.node_id') + + # Safety checks + if [ -z "$name" ]; then + echo "::error title=Workflow name empty::Resolved workflow name is empty at index $index" + exit 1 + fi + if [ -z "$id" ]; then + echo "::error title=Workflow ID missing::Workflow '$name' has no ID" + exit 1 + fi + + cursor="" + + # Paginate over workflow runs while true; do - RUN_IDS=$(gh run list \ - --workflow "$workflow" \ - --limit 100 \ - --json databaseId \ - --jq '.[].databaseId') - - if [ -z "$RUN_IDS" ]; then - break - fi + response=$(gh_workflow_run_page "$id" "$cursor") - BATCH_COUNT=$(echo "$RUN_IDS" | wc -l | tr -d ' ') - WF_COUNT=$((WF_COUNT + BATCH_COUNT)) + run_ids=$(echo "$response" | jq -r '.data.node.runs.nodes[].databaseId') + has_next=$(echo "$response" | jq -r '.data.node.runs.pageInfo.hasNextPage') + cursor=$(echo "$response" | jq -r '.data.node.runs.pageInfo.endCursor') + + [ -z "$run_ids" ] && break + + deleted=$(echo "$run_ids" | wc -l | tr -d ' ') + count=$((count + deleted)) + total=$((total + deleted)) + + # Print progress + print_table_row "[$index/$n_workflows]" "$name" "$count" "$total" if [ "$MODE" = "execute" ]; then - for RUN_ID in $RUN_IDS; do - gh run delete "$RUN_ID" --yes >/dev/null + for run_id in $run_ids; do + gh run delete "$run_id" >/dev/null done fi + + [ "$has_next" != "true" ] && break done - echo "$workflow → $WF_COUNT runs" - TOTAL_AFFECTED=$((TOTAL_AFFECTED + WF_COUNT)) + summary+=("$name|$count") + done - done <<< "$DELETED_WORKFLOWS" + # ================================================================================================ + # Print a summary table + # ================================================================================================ + print_table_header "Workflow Cleanup Summary" + print_summary_row "Workflow" "Runs" + print_table_separator + for entry in "${summary[@]}"; do + wf="${entry%%|*}" + count="${entry##*|}" + print_summary_row "$wf" "$count" + done - echo "" - echo "--------------------------------------" - echo "Total runs affected: $TOTAL_AFFECTED" + # ================================================================================================ + # Print totals as a footer + # ================================================================================================ + print_table_separator + print_summary_row "TOTAL" "$total" - if [ "$MODE" = "dry run" ]; then + if [ "$MODE" != "execute" ]; then echo "Dry run complete. No runs were deleted." else echo "Cleanup complete." From 9bed52eb03b0244b79b23618c7c50abc2b19d433 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Wed, 18 Feb 2026 11:09:32 +1300 Subject: [PATCH 39/77] feat: Validator database (#1614) --- CHANGELOG.md | 1 + Cargo.lock | 4 + bin/node/.env | 2 +- bin/node/src/commands/bundled.rs | 189 +++++++++--------- bin/node/src/commands/mod.rs | 49 ++++- bin/node/src/commands/store.rs | 20 +- bin/node/src/commands/validator.rs | 33 ++- crates/block-producer/src/server/tests.rs | 3 + crates/store/Cargo.toml | 4 +- crates/store/src/accounts/tests.rs | 30 +-- crates/store/src/db/manager.rs | 9 +- crates/store/src/db/mod.rs | 9 +- crates/store/src/db/models/conv.rs | 2 +- crates/store/src/lib.rs | 4 + crates/validator/Cargo.toml | 4 + crates/validator/build.rs | 9 + crates/validator/diesel.toml | 5 + crates/validator/src/block_validation/mod.rs | 58 +++--- crates/validator/src/db/migrations.rs | 25 +++ .../migrations/2025062000000_setup/down.sql | 0 .../db/migrations/2025062000000_setup/up.sql | 10 + crates/validator/src/db/mod.rs | 83 ++++++++ crates/validator/src/db/models.rs | 27 +++ crates/validator/src/db/schema.rs | 8 + crates/validator/src/lib.rs | 1 + crates/validator/src/server/mod.rs | 63 +++--- crates/validator/src/tx_validation/mod.rs | 11 +- .../src/tx_validation/validated_tx.rs | 38 ++++ 28 files changed, 497 insertions(+), 204 deletions(-) create mode 100644 crates/validator/build.rs create mode 100644 crates/validator/diesel.toml create mode 100644 crates/validator/src/db/migrations.rs create mode 100644 crates/validator/src/db/migrations/2025062000000_setup/down.sql create mode 100644 crates/validator/src/db/migrations/2025062000000_setup/up.sql create mode 100644 crates/validator/src/db/mod.rs create mode 100644 crates/validator/src/db/models.rs create mode 100644 crates/validator/src/db/schema.rs create mode 100644 crates/validator/src/tx_validation/validated_tx.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 865e3752e..18946d1f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/miden-node/pull/1579)). - [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). +- Validator now persists validated transactions ([#1614](https://github.com/0xMiden/miden-node/pull/1614)). - [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). - Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). diff --git a/Cargo.lock b/Cargo.lock index c8c67c56a..09653daa0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3012,8 +3012,12 @@ name = "miden-node-validator" version = "0.14.0" dependencies = [ "anyhow", + "deadpool-diesel", + "diesel", + "diesel_migrations", "miden-node-proto", "miden-node-proto-build", + "miden-node-store", "miden-node-utils", "miden-protocol", "miden-tx", diff --git a/bin/node/.env b/bin/node/.env index fc4c2793e..6bdfa9a80 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -10,7 +10,7 @@ MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= -MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= +MIDEN_NODE_VALIDATOR_KEY= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 8bc38fd07..795cd6fe5 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -8,7 +8,6 @@ use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; -use miden_protocol::block::BlockSigner; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; @@ -22,9 +21,10 @@ use crate::commands::{ ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, + ValidatorConfig, duration_to_human_readable_string, }; @@ -51,12 +51,12 @@ pub enum BundledCommand { /// /// If not provided, a predefined key is used. #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX )] - validator_insecure_secret_key: String, + validator_key: String, }, /// Runs all three node components in the same process. @@ -82,6 +82,9 @@ pub enum BundledCommand { #[command(flatten)] ntx_builder: NtxBuilderConfig, + #[command(flatten)] + validator: ValidatorConfig, + /// Enables the exporting of traces for OpenTelemetry. /// /// This can be further configured using environment variables as defined in the official @@ -99,15 +102,6 @@ pub enum BundledCommand { value_name = "DURATION" )] grpc_timeout: Duration, - - /// Insecure, hex-encoded validator secret key for development and testing purposes. - #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_insecure_secret_key: String, }, } @@ -118,14 +112,14 @@ impl BundledCommand { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } => { // Currently the bundled bootstrap is identical to the store's bootstrap. crate::commands::store::StoreCommand::Bootstrap { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } .handle() .await @@ -137,20 +131,18 @@ impl BundledCommand { data_directory, block_producer, ntx_builder, + validator, enable_otel: _, grpc_timeout, - validator_insecure_secret_key, } => { - let secret_key_bytes = hex::decode(validator_insecure_secret_key)?; - let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; Self::start( rpc_url, block_prover_url, data_directory, - ntx_builder, block_producer, + ntx_builder, + validator, grpc_timeout, - signer, ) .await }, @@ -162,10 +154,10 @@ impl BundledCommand { rpc_url: Url, block_prover_url: Option, data_directory: PathBuf, - ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, + ntx_builder: NtxBuilderConfig, + validator: ValidatorConfig, grpc_timeout: Duration, - signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. @@ -177,17 +169,19 @@ impl BundledCommand { .await .context("Failed to bind to RPC gRPC endpoint")?; - let block_producer_address = TcpListener::bind("127.0.0.1:0") - .await - .context("Failed to bind to block-producer gRPC endpoint")? - .local_addr() - .context("Failed to retrieve the block-producer's gRPC address")?; + let (block_producer_url, block_producer_address) = { + let socket_addr = TcpListener::bind("127.0.0.1:0") + .await + .context("Failed to bind to block-producer gRPC endpoint")? + .local_addr() + .context("Failed to retrieve the block-producer's gRPC address")?; + let url = Url::parse(&format!("http://{socket_addr}")) + .context("Failed to parse Block Producer URL")?; + (url, socket_addr) + }; - let validator_address = TcpListener::bind("127.0.0.1:0") - .await - .context("Failed to bind to validator gRPC endpoint")? - .local_addr() - .context("Failed to retrieve the validator's gRPC address")?; + // Validator URL is either specified remote, or generated local. + let (validator_url, validator_socket_address) = validator.to_addresses().await?; // Store addresses for each exposed API let store_rpc_listener = TcpListener::bind("127.0.0.1:0") @@ -231,74 +225,59 @@ impl BundledCommand { let should_start_ntx_builder = !ntx_builder.disabled; // Start block-producer. The block-producer's endpoint is available after loading completes. - let block_producer_id = join_set - .spawn({ - let store_url = Url::parse(&format!("http://{store_block_producer_address}")) - .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - async move { - BlockProducer { - block_producer_address, - store_url, - validator_url, - batch_prover_url: block_producer.batch_prover_url, - batch_interval: block_producer.batch_interval, - block_interval: block_producer.block_interval, - max_batches_per_block: block_producer.max_batches_per_block, - max_txs_per_batch: block_producer.max_txs_per_batch, - grpc_timeout, - mempool_tx_capacity: block_producer.mempool_tx_capacity, + let block_producer_id = { + let validator_url = validator_url.clone(); + join_set + .spawn({ + let store_url = Url::parse(&format!("http://{store_block_producer_address}")) + .context("Failed to parse URL")?; + async move { + BlockProducer { + block_producer_address, + store_url, + validator_url, + batch_prover_url: block_producer.batch_prover_url, + batch_interval: block_producer.batch_interval, + block_interval: block_producer.block_interval, + max_batches_per_block: block_producer.max_batches_per_block, + max_txs_per_batch: block_producer.max_txs_per_batch, + grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, + } + .serve() + .await + .context("failed while serving block-producer component") } - .serve() - .await - .context("failed while serving block-producer component") - } - }) - .id(); + }) + .id() + }; - let validator_id = join_set - .spawn({ - async move { - Validator { - address: validator_address, + // Start RPC component. + let rpc_id = { + let block_producer_url = block_producer_url.clone(); + let validator_url = validator_url.clone(); + join_set + .spawn(async move { + let store_url = Url::parse(&format!("http://{store_rpc_address}")) + .context("Failed to parse URL")?; + Rpc { + listener: grpc_rpc, + store_url, + block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout, - signer, } .serve() .await - .context("failed while serving validator component") - } - }) - .id(); - - // Start RPC component. - let rpc_id = join_set - .spawn(async move { - let store_url = Url::parse(&format!("http://{store_rpc_address}")) - .context("Failed to parse URL")?; - let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - Rpc { - listener: grpc_rpc, - store_url, - block_producer_url: Some(block_producer_url), - validator_url, - grpc_timeout, - } - .serve() - .await - .context("failed while serving RPC component") - }) - .id(); + .context("failed while serving RPC component") + }) + .id() + }; // Lookup table so we can identify the failed component. let mut component_ids = HashMap::from([ (store_id, "store"), (block_producer_id, "block-producer"), - (validator_id, "validator"), (rpc_id, "rpc"), ]); @@ -306,10 +285,8 @@ impl BundledCommand { if should_start_ntx_builder { let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; + let block_producer_url = block_producer_url.clone(); + let validator_url = validator_url.clone(); let builder_config = ntx_builder.into_builder_config( store_ntx_builder_url, @@ -331,6 +308,28 @@ impl BundledCommand { component_ids.insert(id, "ntx-builder"); } + // Start the Validator if we have bound a socket. + if let Some(address) = validator_socket_address { + let secret_key_bytes = hex::decode(validator.validator_key)?; + let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; + let id = join_set + .spawn({ + async move { + Validator { + address, + grpc_timeout, + signer, + data_directory, + } + .serve() + .await + .context("failed while serving validator component") + } + }) + .id(); + component_ids.insert(id, "validator"); + } + // SAFETY: The joinset is definitely not empty. let component_result = join_set.join_next_with_id().await.unwrap(); diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 5b1e8e52a..a4c908846 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,12 +1,15 @@ +use std::net::SocketAddr; use std::num::NonZeroUsize; use std::time::Duration; +use anyhow::Context; use miden_node_block_producer::{ DEFAULT_BATCH_INTERVAL, DEFAULT_BLOCK_INTERVAL, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH, }; +use tokio::net::TcpListener; use url::Url; pub mod block_producer; @@ -36,7 +39,7 @@ const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; -const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; +const ENV_VALIDATOR_KEY: &str = "MIDEN_NODE_VALIDATOR_KEY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -47,7 +50,49 @@ fn duration_to_human_readable_string(duration: Duration) -> String { humantime::format_duration(duration).to_string() } -/// Configuration for the Network Transaction Builder component +/// Configuration for the Validator component. +#[derive(clap::Args)] +pub struct ValidatorConfig { + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// Only used when the Validator URL argument is not set. + #[arg( + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_key: String, + + /// The remote Validator's gRPC URL. If unset, will default to running a Validator + /// in-process. If set, the insecure key argument is ignored. + #[arg(long = "validator.url", env = ENV_VALIDATOR_URL, value_name = "URL")] + validator_url: Option, +} + +impl ValidatorConfig { + /// Converts the [`ValidatorConfig`] into a URL and an optional [`SocketAddr`]. + /// + /// If the `validator_url` is set, it returns the URL and `None` for the [`SocketAddr`]. + /// + /// If `validator_url` is not set, it binds to a random port on localhost, creates a URL, + /// and returns the URL and the bound [`SocketAddr`]. + async fn to_addresses(&self) -> anyhow::Result<(Url, Option)> { + if let Some(url) = &self.validator_url { + Ok((url.clone(), None)) + } else { + let socket_addr = TcpListener::bind("127.0.0.1:0") + .await + .context("Failed to bind to validator gRPC endpoint")? + .local_addr() + .context("Failed to retrieve the validator's gRPC address")?; + let url = Url::parse(&format!("http://{socket_addr}")) + .context("Failed to parse Validator URL")?; + Ok((url, Some(socket_addr))) + } + } +} + +/// Configuration for the Network Transaction Builder component. #[derive(clap::Args)] pub struct NtxBuilderConfig { /// Disable spawning the network transaction builder. diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index bde1cf774..54c741e4d 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -20,7 +20,7 @@ use crate::commands::{ ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -46,14 +46,16 @@ pub enum StoreCommand { genesis_config_file: Option, /// Insecure, hex-encoded validator secret key for development and testing purposes. /// + /// Used to sign the genesis block in the bootstrap process. + /// /// If not provided, a predefined key is used. #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX )] - validator_insecure_secret_key: String, + validator_key: String, }, /// Starts the store component. @@ -109,12 +111,12 @@ impl StoreCommand { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } => Self::bootstrap( &data_directory, &accounts_directory, genesis_config_file.as_ref(), - validator_insecure_secret_key, + validator_key, ), StoreCommand::Start { rpc_url, @@ -192,10 +194,10 @@ impl StoreCommand { data_directory: &Path, accounts_directory: &Path, genesis_config: Option<&PathBuf>, - validator_insecure_secret_key: String, + validator_key: String, ) -> anyhow::Result<()> { // Decode the validator key. - let signer = SecretKey::read_from_bytes(&hex::decode(validator_insecure_secret_key)?)?; + let signer = SecretKey::read_from_bytes(&hex::decode(validator_key)?)?; // Parse genesis config (or default if not given). let config = genesis_config diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index f543be301..461e446c1 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -1,3 +1,4 @@ +use std::path::PathBuf; use std::time::Duration; use anyhow::Context; @@ -9,8 +10,9 @@ use url::Url; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_DATA_DIRECTORY, ENV_ENABLE_OTEL, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, ENV_VALIDATOR_URL, INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, @@ -40,29 +42,42 @@ pub enum ValidatorCommand { )] grpc_timeout: Duration, + /// Directory in which to store the validator's data. + #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] + data_directory: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. /// /// If not provided, a predefined key is used. - #[arg(long = "insecure.secret-key", env = ENV_VALIDATOR_INSECURE_SECRET_KEY, value_name = "INSECURE_SECRET_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] - insecure_secret_key: String, + #[arg(long = "key", env = ENV_VALIDATOR_KEY, value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] + validator_key: String, }, } impl ValidatorCommand { pub async fn handle(self) -> anyhow::Result<()> { let Self::Start { - url, grpc_timeout, insecure_secret_key, .. + url, + grpc_timeout, + validator_key, + data_directory, + .. } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; + let signer = SecretKey::read_from_bytes(hex::decode(validator_key)?.as_ref())?; - Validator { address, grpc_timeout, signer } - .serve() - .await - .context("failed while serving validator component") + Validator { + address, + grpc_timeout, + signer, + data_directory, + } + .serve() + .await + .context("failed while serving validator component") } pub fn is_open_telemetry_enabled(&self) -> bool { diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index c404a2ae9..8c98e9da4 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -44,10 +44,13 @@ async fn block_producer_startup_is_robust_to_network_failures() { // start the validator task::spawn(async move { + let temp_dir = tempfile::tempdir().expect("tempdir should be created"); + let data_directory = temp_dir.path().to_path_buf(); Validator { address: validator_addr, grpc_timeout, signer: SecretKey::random(), + data_directory, } .serve() .await diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 315f49761..5ce4daee7 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -50,7 +50,7 @@ tracing = { workspace = true } url = { workspace = true } [build-dependencies] -miden-node-rocksdb-cxx-linkage-fix = { optional = true, workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } @@ -66,7 +66,7 @@ termtree = { version = "0.5" } [features] default = ["rocksdb"] -rocksdb = ["miden-crypto/rocksdb", "miden-node-rocksdb-cxx-linkage-fix"] +rocksdb = ["miden-crypto/rocksdb"] [[bench]] harness = false diff --git a/crates/store/src/accounts/tests.rs b/crates/store/src/accounts/tests.rs index 4514f2369..9f7b5dcbd 100644 --- a/crates/store/src/accounts/tests.rs +++ b/crates/store/src/accounts/tests.rs @@ -1,7 +1,6 @@ //! Tests for `AccountTreeWithHistory` #[cfg(test)] -#[expect(clippy::similar_names)] #[expect(clippy::needless_range_loop)] #[expect(clippy::uninlined_format_args)] #[expect(clippy::cast_sign_loss)] @@ -152,12 +151,12 @@ mod account_tree_with_history_tests { fn test_many_accounts_sequential_updates() { // Create 50 different account IDs let account_count = 50; - let ids: Vec<_> = (0..account_count) + let account_ids: Vec<_> = (0..account_count) .map(|i| AccountIdBuilder::new().build_with_seed([i as u8; 32])) .collect(); // Create initial state with all accounts having value [i, 0, 0, 0] - let initial_state: Vec<_> = ids + let initial_state: Vec<_> = account_ids .iter() .enumerate() .map(|(i, &id)| (id, Word::from([i as u32, 0, 0, 0]))) @@ -173,7 +172,7 @@ mod account_tree_with_history_tests { .map(|i| { let idx = ((block - 1) * 5 + i) % account_count; let new_value = Word::from([idx as u32 + block as u32 * 100, 0, 0, 0]); - (ids[idx], new_value) + (account_ids[idx], new_value) }) .collect(); hist.compute_and_apply_mutations(updates).unwrap(); @@ -184,7 +183,7 @@ mod account_tree_with_history_tests { // Check genesis state for a few accounts for i in 0..4 { - let witness = hist.open_at(ids[i], BlockNumber::GENESIS).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::GENESIS).unwrap(); assert_eq!( witness.state_commitment(), Word::from([i as u32, 0, 0, 0]), @@ -197,7 +196,8 @@ mod account_tree_with_history_tests { for block in 1..=num_blocks { for i in 0..5 { let idx = ((block - 1) * 5 + i) % account_count; - let witness = hist.open_at(ids[idx], BlockNumber::from(block as u32)).unwrap(); + let witness = + hist.open_at(account_ids[idx], BlockNumber::from(block as u32)).unwrap(); let expected = Word::from([idx as u32 + block as u32 * 100, 0, 0, 0]); assert_eq!( witness.state_commitment(), @@ -302,7 +302,7 @@ mod account_tree_with_history_tests { fn test_sparse_updates_many_accounts() { // Create 200 accounts but only update a few at a time let account_count = 200; - let ids: Vec<_> = (0..account_count) + let account_ids: Vec<_> = (0..account_count) .map(|i| { let mut seed = [0u8; 32]; seed[0] = i as u8; @@ -312,7 +312,7 @@ mod account_tree_with_history_tests { .collect(); // Create initial state with first 50 accounts - let initial_state: Vec<_> = ids + let initial_state: Vec<_> = account_ids .iter() .take(50) .enumerate() @@ -323,7 +323,7 @@ mod account_tree_with_history_tests { let mut hist = AccountTreeWithHistory::new(initial_tree, BlockNumber::GENESIS); // Block 1: Add 50 more accounts - let updates1: Vec<_> = ids + let updates1: Vec<_> = account_ids .iter() .skip(50) .take(50) @@ -333,7 +333,7 @@ mod account_tree_with_history_tests { hist.compute_and_apply_mutations(updates1).unwrap(); // Block 2: Update every 10th account - let updates2: Vec<_> = ids + let updates2: Vec<_> = account_ids .iter() .enumerate() .filter(|(i, _)| i % 10 == 0) @@ -343,7 +343,7 @@ mod account_tree_with_history_tests { hist.compute_and_apply_mutations(updates2).unwrap(); // Block 3: Add remaining accounts - let updates3: Vec<_> = ids + let updates3: Vec<_> = account_ids .iter() .skip(100) .enumerate() @@ -354,13 +354,13 @@ mod account_tree_with_history_tests { // Verify states at different blocks // Check genesis - first 50 accounts exist, others don't for i in 0..50 { - let witness = hist.open_at(ids[i], BlockNumber::GENESIS).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::GENESIS).unwrap(); assert_eq!(witness.state_commitment(), Word::from([i as u32, 0, 0, 0])); } // Check block 1 - first 100 accounts exist for i in 50..100 { - let witness = hist.open_at(ids[i], BlockNumber::from(1)).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::from(1)).unwrap(); assert_eq!(witness.state_commitment(), Word::from([i as u32, 1, 0, 0])); } @@ -368,14 +368,14 @@ mod account_tree_with_history_tests { for i in 0..10 { let idx = i * 10; if idx < 100 { - let witness = hist.open_at(ids[idx], BlockNumber::from(2)).unwrap(); + let witness = hist.open_at(account_ids[idx], BlockNumber::from(2)).unwrap(); assert_eq!(witness.state_commitment(), Word::from([idx as u32, 2, 0, 0])); } } // Check block 3 - all 200 accounts should be accessible for i in [0, 50, 100, 150, 199] { - let witness = hist.open_at(ids[i], BlockNumber::from(3)); + let witness = hist.open_at(account_ids[i], BlockNumber::from(3)); assert!(witness.is_some(), "Account {} should exist at block 3", i); } } diff --git a/crates/store/src/db/manager.rs b/crates/store/src/db/manager.rs index fca9a33db..5ac72e0ad 100644 --- a/crates/store/src/db/manager.rs +++ b/crates/store/src/db/manager.rs @@ -36,12 +36,12 @@ impl ConnectionManagerError { /// Create a connection manager with per-connection setup /// /// Particularly, `foreign_key` checks are enabled and using a write-append-log for journaling. -pub(crate) struct ConnectionManager { +pub struct ConnectionManager { pub(crate) manager: deadpool_diesel::sqlite::Manager, } impl ConnectionManager { - pub(crate) fn new(database_path: &str) -> Self { + pub fn new(database_path: &str) -> Self { let manager = deadpool_diesel::sqlite::Manager::new( database_path.to_owned(), deadpool_diesel::sqlite::Runtime::Tokio1, @@ -78,6 +78,11 @@ impl deadpool::managed::Manager for ConnectionManager { pub(crate) fn configure_connection_on_creation( conn: &mut SqliteConnection, ) -> Result<(), ConnectionManagerError> { + // Wait up to 5 seconds for writer locks before erroring. + diesel::sql_query("PRAGMA busy_timeout=5000") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + // Enable the WAL mode. This allows concurrent reads while the transaction is being written, // this is required for proper synchronization of the servers in-memory and on-disk // representations (see [State::apply_block]) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 0b8f0fd42..5ab4d55b1 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -209,6 +209,11 @@ impl From for NoteSyncRecord { } impl Db { + /// Creates a new database instance with the provided connection pool. + pub fn new(pool: deadpool_diesel::Pool) -> Self { + Self { pool } + } + /// Creates a new database and inserts the genesis block. #[instrument( target = COMPONENT, @@ -251,7 +256,7 @@ impl Db { } /// Create and commit a transaction with the queries added in the provided closure - pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result + pub async fn transact(&self, msg: M, query: Q) -> std::result::Result where Q: Send + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result @@ -276,7 +281,7 @@ impl Db { } /// Run the query _without_ a transaction - pub(crate) async fn query(&self, msg: M, query: Q) -> std::result::Result + pub async fn query(&self, msg: M, query: Q) -> std::result::Result where Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, R: Send + 'static, diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index de6f7e950..3720729b1 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -50,7 +50,7 @@ pub struct DatabaseTypeConversionError { /// Convert from and to it's database representation and back /// /// We do not assume sanity of DB types. -pub(crate) trait SqlTypeConvert: Sized { +pub trait SqlTypeConvert: Sized { type Raw: Sized; fn to_raw_sql(self) -> Self::Raw; diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 1cc028ac3..06bba2fe8 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -10,6 +10,10 @@ pub mod state; #[cfg(feature = "rocksdb")] pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; +pub use db::Db; +pub use db::manager::ConnectionManager; +pub use db::models::conv::SqlTypeConvert; +pub use errors::{DatabaseError, DatabaseSetupError}; pub use genesis::GenesisState; pub use server::block_prover_client::BlockProver; pub use server::{DataDirectory, Store}; diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 6115e7cff..26a76a2b3 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -18,8 +18,12 @@ workspace = true [dependencies] anyhow = { workspace = true } +deadpool-diesel = { workspace = true } +diesel = { workspace = true } +diesel_migrations = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-store = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { workspace = true } miden-tx = { workspace = true } diff --git a/crates/validator/build.rs b/crates/validator/build.rs new file mode 100644 index 000000000..b9f947e17 --- /dev/null +++ b/crates/validator/build.rs @@ -0,0 +1,9 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `validator/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . +fn main() { + println!("cargo:rerun-if-changed=./src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + println!("cargo:rerun-if-changed=Cargo.toml"); +} diff --git a/crates/validator/diesel.toml b/crates/validator/diesel.toml new file mode 100644 index 000000000..bdce9175f --- /dev/null +++ b/crates/validator/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index c1cab190b..143d2dee1 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -1,22 +1,24 @@ -use std::sync::Arc; - -use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; +use miden_node_store::{DatabaseError, Db}; +use miden_protocol::block::{BlockSigner, ProposedBlock}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::errors::ProposedBlockError; -use miden_protocol::transaction::TransactionId; -use tracing::{Instrument, info_span}; +use miden_protocol::transaction::{TransactionHeader, TransactionId}; +use tracing::{info_span, instrument}; -use crate::server::ValidatedTransactions; +use crate::COMPONENT; +use crate::db::find_unvalidated_transactions; // BLOCK VALIDATION ERROR // ================================================================================================ #[derive(thiserror::Error, Debug)] pub enum BlockValidationError { - #[error("transaction {0} in block {1} has not been validated")] - TransactionNotValidated(TransactionId, BlockNumber), + #[error("block contains unvalidated transactions {0:?}")] + UnvalidatedTransactions(Vec), #[error("failed to build block")] - BlockBuildingFailed(#[from] ProposedBlockError), + BlockBuildingFailed(#[source] ProposedBlockError), + #[error("failed to select transactions")] + DatabaseError(#[source] DatabaseError), } // BLOCK VALIDATION @@ -24,33 +26,31 @@ pub enum BlockValidationError { /// Validates a block by checking that all transactions in the proposed block have been processed by /// the validator in the past. -/// -/// Removes the validated transactions from the cache upon success. +#[instrument(target = COMPONENT, skip_all, err)] pub async fn validate_block( proposed_block: ProposedBlock, signer: &S, - validated_transactions: Arc, + db: &Db, ) -> Result { - // Check that all transactions in the proposed block have been validated - let verify_span = info_span!("verify_transactions"); - for tx_header in proposed_block.transactions() { - let tx_id = tx_header.id(); - // TODO: LruCache is a poor abstraction since it locks many times. - if validated_transactions - .get(&tx_id) - .instrument(verify_span.clone()) - .await - .is_none() - { - return Err(BlockValidationError::TransactionNotValidated( - tx_id, - proposed_block.block_num(), - )); - } + // Search for any proposed transactions that have not previously been validated. + let proposed_tx_ids = + proposed_block.transactions().map(TransactionHeader::id).collect::>(); + let unvalidated_txs = db + .transact("find_unvalidated_transactions", move |conn| { + find_unvalidated_transactions(conn, &proposed_tx_ids) + }) + .await + .map_err(BlockValidationError::DatabaseError)?; + + // All proposed transactions must have been validated. + if !unvalidated_txs.is_empty() { + return Err(BlockValidationError::UnvalidatedTransactions(unvalidated_txs)); } // Build the block header. - let (header, _) = proposed_block.into_header_and_body()?; + let (header, _) = proposed_block + .into_header_and_body() + .map_err(BlockValidationError::BlockBuildingFailed)?; // Sign the header. let signature = info_span!("sign_block").in_scope(|| signer.sign(&header)); diff --git a/crates/validator/src/db/migrations.rs b/crates/validator/src/db/migrations.rs new file mode 100644 index 000000000..6896082be --- /dev/null +++ b/crates/validator/src/db/migrations.rs @@ -0,0 +1,25 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use miden_node_store::DatabaseError; +use tracing::instrument; + +use crate::COMPONENT; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> std::result::Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + return Ok(()); + }; + tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/validator/src/db/migrations/2025062000000_setup/down.sql b/crates/validator/src/db/migrations/2025062000000_setup/down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/crates/validator/src/db/migrations/2025062000000_setup/up.sql b/crates/validator/src/db/migrations/2025062000000_setup/up.sql new file mode 100644 index 000000000..06297a970 --- /dev/null +++ b/crates/validator/src/db/migrations/2025062000000_setup/up.sql @@ -0,0 +1,10 @@ +CREATE TABLE validated_transactions ( + id BLOB NOT NULL, + block_num INTEGER NOT NULL, + account_id BLOB NOT NULL, + "transaction" BLOB NOT NULL, -- Binary encoded ExecutedTransaction. + PRIMARY KEY (id) +) WITHOUT ROWID; + +CREATE INDEX idx_validated_transactions_account_id ON validated_transactions(account_id); +CREATE INDEX idx_validated_transactions_block_num ON validated_transactions(block_num); diff --git a/crates/validator/src/db/mod.rs b/crates/validator/src/db/mod.rs new file mode 100644 index 000000000..14d85e34f --- /dev/null +++ b/crates/validator/src/db/mod.rs @@ -0,0 +1,83 @@ +mod migrations; +mod models; +mod schema; + +use std::path::PathBuf; + +use diesel::SqliteConnection; +use diesel::dsl::exists; +use diesel::prelude::*; +use miden_node_store::{ConnectionManager, DatabaseError, DatabaseSetupError}; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::Serializable; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::apply_migrations; +use crate::db::models::ValidatedTransactionRowInsert; +use crate::tx_validation::ValidatedTransaction; + +/// Open a connection to the DB and apply any pending migrations. +#[instrument(target = COMPONENT, skip_all)] +pub async fn load(database_filepath: PathBuf) -> Result { + let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); + let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; + + tracing::info!( + target: COMPONENT, + sqlite= %database_filepath.display(), + "Connected to the database" + ); + + let db = miden_node_store::Db::new(pool); + db.query("migrations", apply_migrations).await?; + Ok(db) +} + +/// Inserts a new validated transaction into the database. +#[instrument(target = COMPONENT, skip_all, fields(tx_id = %tx_info.tx_id()), err)] +pub(crate) fn insert_transaction( + conn: &mut SqliteConnection, + tx_info: &ValidatedTransaction, +) -> Result { + let row = ValidatedTransactionRowInsert::new(tx_info); + let count = diesel::insert_into(schema::validated_transactions::table) + .values(row) + .on_conflict_do_nothing() + .execute(conn)?; + Ok(count) +} + +/// Scans the database for transaction Ids that do not exist. +/// +/// If the resulting vector is empty, all supplied transaction ids have been validated in the past. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT EXISTS( +/// SELECT 1 +/// FROM validated_transactions +/// WHERE id = ? +/// ); +/// ``` +#[instrument(target = COMPONENT, skip(conn), err)] +pub(crate) fn find_unvalidated_transactions( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], +) -> Result, DatabaseError> { + let mut unvalidated_tx_ids = Vec::new(); + for tx_id in tx_ids { + // Check whether each transaction id exists in the database. + let exists = diesel::select(exists( + schema::validated_transactions::table + .filter(schema::validated_transactions::id.eq(tx_id.to_bytes())), + )) + .get_result::(conn)?; + // Record any transaction ids that do not exist. + if !exists { + unvalidated_tx_ids.push(*tx_id); + } + } + Ok(unvalidated_tx_ids) +} diff --git a/crates/validator/src/db/models.rs b/crates/validator/src/db/models.rs new file mode 100644 index 000000000..e1e67086a --- /dev/null +++ b/crates/validator/src/db/models.rs @@ -0,0 +1,27 @@ +use diesel::prelude::*; +use miden_node_store::SqlTypeConvert; +use miden_tx::utils::Serializable; + +use crate::db::schema; +use crate::tx_validation::ValidatedTransaction; + +#[derive(Debug, Clone, PartialEq, Insertable)] +#[diesel(table_name = schema::validated_transactions)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct ValidatedTransactionRowInsert { + pub id: Vec, + pub block_num: i64, + pub account_id: Vec, + pub transaction: Vec, +} + +impl ValidatedTransactionRowInsert { + pub fn new(tx: &ValidatedTransaction) -> Self { + Self { + id: tx.tx_id().to_bytes(), + block_num: tx.block_num().to_raw_sql(), + account_id: tx.account_id().to_bytes(), + transaction: tx.to_bytes(), + } + } +} diff --git a/crates/validator/src/db/schema.rs b/crates/validator/src/db/schema.rs new file mode 100644 index 000000000..0d299dbfd --- /dev/null +++ b/crates/validator/src/db/schema.rs @@ -0,0 +1,8 @@ +diesel::table! { + validated_transactions (id, block_num, account_id, transaction) { + id -> Binary, + block_num -> BigInt, + account_id -> Binary, + transaction -> Binary, + } +} diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index a45112d27..a987304c3 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,4 +1,5 @@ mod block_validation; +mod db; mod server; mod tx_validation; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 89d28d25d..94bf41315 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -1,5 +1,5 @@ use std::net::SocketAddr; -use std::num::NonZeroUsize; +use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -7,36 +7,26 @@ use anyhow::Context; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; +use miden_node_store::Db; use miden_node_utils::ErrorReport; -use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; use miden_protocol::block::{BlockSigner, ProposedBlock}; -use miden_protocol::transaction::{ - ProvenTransaction, - TransactionHeader, - TransactionId, - TransactionInputs, -}; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; -use tracing::{Instrument, info_span}; +use tracing::{info_span, instrument}; use crate::COMPONENT; use crate::block_validation::validate_block; +use crate::db::{insert_transaction, load}; use crate::tx_validation::validate_transaction; -/// Number of transactions to keep in the validated transactions cache. -const NUM_VALIDATED_TRANSACTIONS: NonZeroUsize = NonZeroUsize::new(10000).unwrap(); - -/// A type alias for a LRU cache that stores validated transactions. -pub type ValidatedTransactions = LruCache; - // VALIDATOR // ================================================================================ @@ -53,6 +43,9 @@ pub struct Validator { /// The signer used to sign blocks. pub signer: S, + + /// The data directory for the validator component's database files. + pub data_directory: PathBuf, } impl Validator { @@ -63,6 +56,11 @@ impl Validator { pub async fn serve(self) -> anyhow::Result<()> { tracing::info!(target: COMPONENT, endpoint=?self.address, "Initializing server"); + // Initialize database connection. + let db = load(self.data_directory.join("validator.sqlite3")) + .await + .context("failed to initialize validator database")?; + let listener = TcpListener::bind(self.address) .await .context("failed to bind to block producer address")?; @@ -86,7 +84,7 @@ impl Validator { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .timeout(self.grpc_timeout) - .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer))) + .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer, db))) .add_service(reflection_service) .add_service(reflection_service_alpha) .serve_with_incoming(TcpListenerStream::new(listener)) @@ -103,14 +101,12 @@ impl Validator { /// Implements the gRPC API for the validator. struct ValidatorServer { signer: S, - validated_transactions: Arc, + db: Arc, } impl ValidatorServer { - fn new(signer: S) -> Self { - let validated_transactions = - Arc::new(ValidatedTransactions::new(NUM_VALIDATED_TRANSACTIONS)); - Self { signer, validated_transactions } + fn new(signer: S, db: Db) -> Self { + Self { signer, db: db.into() } } } @@ -128,6 +124,7 @@ impl api_server::Api for ValidatorServer } /// Receives a proven transaction, then validates and stores it. + #[instrument(target = COMPONENT, skip_all, err)] async fn submit_proven_transaction( &self, request: tonic::Request, @@ -150,17 +147,14 @@ impl api_server::Api for ValidatorServer tracing::Span::current().set_attribute("transaction.id", tx.id()); // Validate the transaction. - let validated_tx_header = validate_transaction(tx, inputs).await.map_err(|err| { + let tx_info = validate_transaction(tx, inputs).await.map_err(|err| { Status::invalid_argument(err.as_report_context("Invalid transaction")) })?; - // Register the validated transaction. - let tx_id = validated_tx_header.id(); - self.validated_transactions - .put(tx_id, validated_tx_header) - .instrument(info_span!("validated_txs.insert")) - .await; - + // Store the validated transaction. + self.db + .transact("insert_transaction", move |conn| insert_transaction(conn, &tx_info)) + .await?; Ok(tonic::Response::new(())) } @@ -181,11 +175,12 @@ impl api_server::Api for ValidatorServer // Validate the block. let signature = - validate_block(proposed_block, &self.signer, self.validated_transactions.clone()) - .await - .map_err(|err| { - tonic::Status::invalid_argument(format!("Failed to validate block: {err}",)) - })?; + validate_block(proposed_block, &self.signer, &self.db).await.map_err(|err| { + tonic::Status::invalid_argument(format!( + "Failed to validate block: {}", + err.as_report() + )) + })?; // Send the signature. info_span!("serialize").in_scope(|| { diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs index 20d610aca..f2d1250a2 100644 --- a/crates/validator/src/tx_validation/mod.rs +++ b/crates/validator/src/tx_validation/mod.rs @@ -1,11 +1,15 @@ mod data_store; +mod validated_tx; pub use data_store::TransactionInputsDataStore; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; use miden_tx::auth::UnreachableAuth; use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; -use tracing::{Instrument, info_span}; +use tracing::{Instrument, info_span, instrument}; +pub use validated_tx::ValidatedTransaction; + +use crate::COMPONENT; // TRANSACTION VALIDATION ERROR // ================================================================================================ @@ -30,10 +34,11 @@ pub enum TransactionValidationError { /// provided proven transaction. /// /// Returns the header of the executed transaction if successful. +#[instrument(target = COMPONENT, skip_all, err)] pub async fn validate_transaction( proven_tx: ProvenTransaction, tx_inputs: TransactionInputs, -) -> Result { +) -> Result { // First, verify the transaction proof info_span!("verify").in_scope(|| { let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); @@ -56,7 +61,7 @@ pub async fn validate_transaction( let executed_tx_header: TransactionHeader = (&executed_tx).into(); let proven_tx_header: TransactionHeader = (&proven_tx).into(); if executed_tx_header == proven_tx_header { - Ok(executed_tx_header) + Ok(ValidatedTransaction::new(executed_tx)) } else { Err(TransactionValidationError::Mismatch { proven_tx_header: proven_tx_header.into(), diff --git a/crates/validator/src/tx_validation/validated_tx.rs b/crates/validator/src/tx_validation/validated_tx.rs new file mode 100644 index 000000000..3ee7dfa45 --- /dev/null +++ b/crates/validator/src/tx_validation/validated_tx.rs @@ -0,0 +1,38 @@ +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::{ExecutedTransaction, TransactionId}; +use miden_tx::utils::Serializable; + +/// Re-executed and validated transaction that the Validator, or some ad-hoc +/// auditing procedure, might need to analyze. +/// +/// Constructed from an [`ExecutedTransaction`] that the Validator would have created while +/// re-executing and validating a [`miden_protocol::transaction::ProvenTransaction`]. +pub struct ValidatedTransaction(ExecutedTransaction); + +impl ValidatedTransaction { + /// Creates a new instance of [`ValidatedTransactionInfo`]. + pub fn new(tx: ExecutedTransaction) -> Self { + Self(tx) + } + + /// Returns ID of the transaction. + pub fn tx_id(&self) -> TransactionId { + self.0.id() + } + + /// Returns the block number in which the transaction was executed. + pub fn block_num(&self) -> BlockNumber { + self.0.block_header().block_num() + } + + /// Returns ID of the account against which this transaction was executed. + pub fn account_id(&self) -> AccountId { + self.0.account_delta().id() + } + + /// Returns the binary representation of the transaction info. + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} From 2f26190077c9f5c0227550ba9b74bc21a0027e29 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 18 Feb 2026 15:29:54 +0100 Subject: [PATCH 40/77] feat/ci: ensure static linkage of C deps (#1684) --- .github/workflows/ci.yml | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f89d38d2f..b8bea522e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,43 @@ jobs: save-if: ${{ github.ref == 'refs/heads/next' }} - name: cargo build run: cargo build --workspace --all-targets --locked + - name: Check static linkage + run: | + # Ensure database libraries are statically linked to avoid system library dependencies + # + # It explodes our possible dependency matrix when debugging, particularly + # in the case of sqlite and rocksdb as embedded databases, we want them + # shipped in identical versions we test with. Those are notoriously difficult + # to compile time configure and OSes make very opinionated choices. + metadata=$(cargo metadata --no-deps --format-version 1) + mapfile -t bin_targets < <( + echo "${metadata}" | jq -r '.packages[].targets[] | select(.kind[] == "bin") | .name' | sort -u + ) + if [[ ${#bin_targets[@]} -eq 0 ]]; then + echo "error: No binary targets found in cargo manifest." + exit 1 + fi + for bin_target in "${bin_targets[@]}"; do + # Ensure the binary was built by the previous step. + binary_path="target/debug/${bin_target}" + if ! [[ -x "${binary_path}" ]]; then + echo "error: Missing binary or missing executable bit: ${binary_path}"; + exit 2; + fi + # ldd exits non-zero for static binaries, so we inspect its output instead. + # if ldd fails we use an empty string instead + ldd_output="$(ldd "${binary_path}" 2>&1 || true)" + if echo "${ldd_output}" | grep -E -q 'not a dynamic executable'; then + continue + fi + # librocksdb/libsqlite entries indicate dynamic linkage (bad). + if echo "${ldd_output}" | grep -E -q 'librocksdb|libsqlite'; then + echo "error: Dynamic linkage detected for ${bin_target}." + echo "${ldd_output}" + exit 3 + fi + done + echo "Static linkage check passed for all of ${bin_targets[@]}" clippy: name: lint - clippy From 21a84c580e1f37810e9b76d3cf250d7448d42482 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 18 Feb 2026 17:31:16 +0200 Subject: [PATCH 41/77] ci: simplify msrv CI check (#1673) --- .github/workflows/nightly.yml | 68 +++++++++++++-- scripts/check-msrv.sh | 153 ---------------------------------- 2 files changed, 59 insertions(+), 162 deletions(-) delete mode 100755 scripts/check-msrv.sh diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index a5d6e3cae..1d3755341 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@v6 with: - ref: 'next' + ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - name: Install RocksDB @@ -42,7 +42,7 @@ jobs: steps: - uses: actions/checkout@v6 with: - ref: 'next' + ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - name: Install RocksDB @@ -54,15 +54,65 @@ jobs: - name: Check all feature combinations run: make check-features - # Check that our MSRV complies with our specified rust version. + workspace-packages: + name: list packages + runs-on: ubuntu-latest + outputs: + packages: ${{ steps.package-matrix.outputs.packages }} + # Deliberately use stable rust instead of the toolchain.toml version. + # This prevents installing the toolchain version which isn't crucial for this operation. + env: + RUSTUP_TOOLCHAIN: stable + steps: + - uses: actions/checkout@v6 + with: + ref: "next" + - name: Extract workspace packages + id: package-matrix + run: | + PACKAGES=$(cargo metadata --format-version 1 --no-deps \ + | jq -c ' + .workspace_members as $members + | .packages + | map(select(.id as $id | $members | index($id))) + | map(.name) + ') + + echo "packages=$PACKAGES" >> "$GITHUB_OUTPUT" + msrv: - name: msrv check - runs-on: ubuntu-24.04 + needs: workspace-packages + runs-on: ubuntu-latest + strategy: + matrix: + package: ${{ fromJson(needs.workspace-packages.outputs.packages) }} + # Deliberately use stable rust instead of the toolchain.toml version. + # This is prevents issues where e.g. `cargo-msrv` requires a newer version of rust than the toolchain.toml version. + env: + RUSTUP_TOOLCHAIN: stable steps: - uses: actions/checkout@v6 with: - ref: 'next' - - name: check + ref: "next" + - name: Install binstall + uses: cargo-bins/cargo-binstall@main + - name: Install cargo-msrv + run: cargo binstall --no-confirm cargo-msrv + - name: Get manifest path for package + id: pkg + run: | + MANIFEST_PATH=$(cargo metadata --format-version 1 --no-deps \ + | jq -r ' + .packages[] + | select(.name == "${{ matrix.package }}") + | .manifest_path + ') + echo "manifest_path=$MANIFEST_PATH" >> "$GITHUB_OUTPUT" + - name: Show package info + run: | + echo "Package: ${{ matrix.package }}" + echo "Manifest path: ${{ steps.pkg.outputs.manifest_path }}" + cargo msrv show --manifest-path "${{ steps.pkg.outputs.manifest_path }}" + - name: Check MSRV run: | - export PATH="$HOME/.cargo/bin:$PATH" - ./scripts/check-msrv.sh + cargo msrv verify --manifest-path "${{ steps.pkg.outputs.manifest_path }}" diff --git a/scripts/check-msrv.sh b/scripts/check-msrv.sh deleted file mode 100755 index 6058a0ace..000000000 --- a/scripts/check-msrv.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -set -e -set -o pipefail - -# Enhanced MSRV checking script for workspace repository -# Checks MSRV for each workspace member and provides helpful error messages - -# ---- utilities -------------------------------------------------------------- - -check_command() { - if ! command -v "$1" >/dev/null 2>&1; then - echo "ERROR: Required command '$1' is not installed or not in PATH" - exit 1 - fi -} - -# Check required commands -check_command "cargo" -check_command "jq" -check_command "rustup" -check_command "sed" -check_command "grep" -check_command "awk" - -# Portable in-place sed (GNU/macOS); usage: sed_i 's/foo/bar/' file -# shellcheck disable=SC2329 # used quoted -sed_i() { - if sed --version >/dev/null 2>&1; then - sed -i "$@" - else - sed -i '' "$@" - fi -} - -# ---- repo root -------------------------------------------------------------- - -# Get the directory where this script is located and change to the parent directory -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$DIR/.." - -echo "Checking MSRV for workspace members..." - -# ---- metadata -------------------------------------------------------------- - -metadata_json="$(cargo metadata --no-deps --format-version 1)" -workspace_root="$(printf '%s' "$metadata_json" | jq -r '.workspace_root')" - -failed_packages="" - -# Iterate actual workspace packages with manifest paths and (maybe) rust_version -# Fields per line (TSV): id name manifest_path rust_version_or_empty -while IFS=$'\t' read -r pkg_id package_name manifest_path rust_version; do - # Derive package directory (avoid external dirname for portability) - package_dir="${manifest_path%/*}" - if [[ -z "$package_dir" || "$package_dir" == "$manifest_path" ]]; then - package_dir="." - fi - - echo "Checking $package_name ($pkg_id) in $package_dir" - - if [[ ! -f "$package_dir/Cargo.toml" ]]; then - echo "WARNING: No Cargo.toml found in $package_dir, skipping..." - continue - fi - - # Prefer cargo metadata's effective rust_version if present - current_msrv="$rust_version" - if [[ -z "$current_msrv" ]]; then - # If the crate inherits: rust-version.workspace = true - if grep -Eq '^\s*rust-version\.workspace\s*=\s*true\b' "$package_dir/Cargo.toml"; then - # Read from workspace root [workspace.package] - current_msrv="$(grep -Eo '^\s*rust-version\s*=\s*"[^"]+"' "$workspace_root/Cargo.toml" | head -n1 | sed -E 's/.*"([^"]+)".*/\1/')" - if [[ -n "$current_msrv" ]]; then - echo " Using workspace MSRV: $current_msrv" - fi - fi - fi - - if [[ -z "$current_msrv" ]]; then - echo "WARNING: No rust-version found (package or workspace) for $package_name" - continue - fi - - echo " Current MSRV: $current_msrv" - - # Try to verify the MSRV - if ! cargo msrv verify --manifest-path "$package_dir/Cargo.toml" >/dev/null 2>&1; then - echo "ERROR: MSRV check failed for $package_name" - failed_packages="$failed_packages $package_name" - - echo "Searching for correct MSRV for $package_name..." - - # Determine the currently-installed stable toolchain version (e.g., "1.91.1") - latest_stable="$(rustup run stable rustc --version 2>/dev/null | awk '{print $2}')" - if [[ -z "$latest_stable" ]]; then latest_stable="1.91.1"; fi - - # Search for the actual MSRV starting from the current one - if actual_msrv=$(cargo msrv find \ - --manifest-path "$package_dir/Cargo.toml" \ - --min "$current_msrv" \ - --max "$latest_stable" \ - --output-format minimal 2>/dev/null); then - echo " Found actual MSRV: $actual_msrv" - echo "" - echo "ERROR SUMMARY for $package_name:" - echo " Package: $package_name" - echo " Directory: $package_dir" - echo " Current (incorrect) MSRV: $current_msrv" - echo " Correct MSRV: $actual_msrv" - echo "" - echo "TO FIX:" - echo " Update rust-version in $package_dir/Cargo.toml from \"$current_msrv\" to \"$actual_msrv\"" - echo "" - echo " Or run this command (portable in-place edit):" - echo " sed_i 's/^\\s*rust-version\\s*=\\s*\"$current_msrv\"/rust-version = \"$actual_msrv\"/' \"$package_dir/Cargo.toml\"" - else - echo " Could not determine correct MSRV automatically" - echo "" - echo "ERROR SUMMARY for $package_name:" - echo " Package: $package_name" - echo " Directory: $package_dir" - echo " Current (incorrect) MSRV: $current_msrv" - echo " Could not automatically determine correct MSRV" - echo "" - echo "TO FIX:" - echo " Run manually: cargo msrv find --manifest-path \"$package_dir/Cargo.toml\"" - fi - echo "-------------------------------------------------------------------------------" - else - echo "OK: MSRV check passed for $package_name" - fi - echo "" - -done < <( - printf '%s' "$metadata_json" \ - | jq -r '. as $m - | $m.workspace_members[] - | . as $id - | ($m.packages[] | select(.id == $id) - | [ .id, .name, .manifest_path, (.rust_version // "") ] | @tsv)' -) - -if [[ -n "$failed_packages" ]]; then - echo "MSRV CHECK FAILED" - echo "" - echo "The following packages have incorrect MSRV settings:$failed_packages" - echo "" - echo "Please fix the rust-version fields in the affected Cargo.toml files as shown above." - exit 1 -else - echo "ALL WORKSPACE MEMBERS PASSED MSRV CHECKS!" - exit 0 -fi From 32cf9446b5efd240650a255bf91f29b28772339e Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 18 Feb 2026 20:45:14 +0100 Subject: [PATCH 42/77] refactor: remove pagination from SyncChainMMR RPC (#1682) --- bin/stress-test/src/store/mod.rs | 59 ++++-------------------------- crates/proto/src/generated/rpc.rs | 8 ++-- crates/rpc/README.md | 9 +++++ crates/rpc/src/tests.rs | 4 -- crates/store/README.md | 9 +++++ crates/store/src/server/rpc_api.rs | 23 +++--------- crates/utils/src/limiter.rs | 10 ----- docs/external/src/rpc.md | 7 ++++ proto/proto/rpc.proto | 8 ++-- 9 files changed, 46 insertions(+), 91 deletions(-) diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index 3b9811d6e..314a5e95d 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -439,9 +439,7 @@ pub async fn bench_sync_chain_mmr( let request = |_| { let mut client = store_client.clone(); - tokio::spawn(async move { - sync_chain_mmr_paginated(&mut client, chain_tip, block_range_size).await - }) + tokio::spawn(async move { sync_chain_mmr(&mut client, chain_tip, block_range_size).await }) }; let results = stream::iter(0..iterations) @@ -456,77 +454,34 @@ pub async fn bench_sync_chain_mmr( print_summary(&timers_accumulator); let total_runs = results.len(); - let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); - #[expect(clippy::cast_precision_loss)] - let pagination_rate = if total_runs > 0 { - (paginated_runs as f64 / total_runs as f64) * 100.0 - } else { - 0.0 - }; - #[expect(clippy::cast_precision_loss)] - let avg_pages = if total_runs > 0 { - results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 - } else { - 0.0 - }; println!("Pagination statistics:"); println!(" Total runs: {total_runs}"); - println!(" Runs triggering pagination: {paginated_runs}"); - println!(" Pagination rate: {pagination_rate:.2}%"); - println!(" Average pages per run: {avg_pages:.2}"); } /// Sends a single `sync_chain_mmr` request to the store and returns a tuple with: /// - the elapsed time. /// - the response. -pub async fn sync_chain_mmr( +async fn sync_chain_mmr( api_client: &mut RpcClient>, block_from: u32, block_to: u32, -) -> (Duration, proto::rpc::SyncChainMmrResponse) { +) -> SyncChainMmrRun { let sync_request = proto::rpc::SyncChainMmrRequest { block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), }; let start = Instant::now(); let response = api_client.sync_chain_mmr(sync_request).await.unwrap(); - (start.elapsed(), response.into_inner()) + let elapsed = start.elapsed(); + let response = response.into_inner(); + let _mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + SyncChainMmrRun { duration: elapsed } } #[derive(Clone)] struct SyncChainMmrRun { duration: Duration, - pages: usize, -} - -async fn sync_chain_mmr_paginated( - api_client: &mut RpcClient>, - chain_tip: u32, - block_range_size: u32, -) -> SyncChainMmrRun { - let mut total_duration = Duration::default(); - let mut pages = 0usize; - let mut next_block_from = 0u32; - - loop { - let target_block_to = next_block_from.saturating_add(block_range_size).min(chain_tip); - let (elapsed, response) = - sync_chain_mmr(api_client, next_block_from, target_block_to).await; - total_duration += elapsed; - pages += 1; - - let pagination_info = response.pagination_info.expect("pagination_info should exist"); - let _mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); - - if pagination_info.block_num >= pagination_info.chain_tip { - break; - } - - next_block_from = pagination_info.block_num; - } - - SyncChainMmrRun { duration: total_duration, pages } } // LOAD STATE diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index cc3273e14..5cedf1208 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -442,11 +442,11 @@ pub struct SyncChainMmrRequest { /// Represents the result of syncing chain MMR. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SyncChainMmrResponse { - /// Pagination information. + /// For which block range the MMR delta is returned. #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, + pub block_range: ::core::option::Option, /// Data needed to update the partial MMR from `request.block_range.block_from + 1` to - /// `pagination_info.block_num`. + /// `response.block_range.block_to` or the chain tip. #[prost(message, optional, tag = "2")] pub mmr_delta: ::core::option::Option, } @@ -1052,6 +1052,7 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } + /// Returns MMR delta needed to synchronize the chain MMR within the requested block range. pub async fn sync_chain_mmr( &mut self, request: impl tonic::IntoRequest, @@ -1236,6 +1237,7 @@ pub mod api_server { tonic::Response, tonic::Status, >; + /// Returns MMR delta needed to synchronize the chain MMR within the requested block range. async fn sync_chain_mmr( &self, request: tonic::Request, diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 13c8debce..bfa790910 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -25,6 +25,7 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [SyncAccountVault](#SyncAccountVault) - [SyncNotes](#syncnotes) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) @@ -236,6 +237,14 @@ When storage map synchronization fails, detailed error information is provided t --- +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range along with pagination info so the caller can continue syncing until the chain tip. + +--- + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 472e62daf..e4218bcb6 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -559,10 +559,6 @@ async fn sync_chain_mmr_returns_delta() { let response = rpc_client.sync_chain_mmr(request).await.expect("sync_chain_mmr should succeed"); let response = response.into_inner(); - let pagination_info = response.pagination_info.expect("pagination_info should exist"); - assert_eq!(pagination_info.chain_tip, 0); - assert_eq!(pagination_info.block_num, 0); - let mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); assert_eq!(mmr_delta.forest, 0); assert!(mmr_delta.data.is_empty()); diff --git a/crates/store/README.md b/crates/store/README.md index 3ca7e19aa..65a4f148b 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -55,6 +55,7 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) @@ -249,6 +250,14 @@ When storage map synchronization fails, detailed error information is provided t --- +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range and the returned `block_range` reflects the last block included, which may be the chain tip. + +--- + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index f5d12d6b4..78da84617 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -158,9 +158,6 @@ impl rpc_server::Rpc for StoreApi { &self, request: Request, ) -> Result, Status> { - // TODO find a reasonable upper boundary - const MAX_BLOCKS: u32 = 1 << 20; - let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -183,23 +180,13 @@ impl rpc_server::Rpc for StoreApi { }))?; } let block_range = block_from..=block_to; - let len = 1 + block_range.end().as_u32() - block_range.start().as_u32(); - let trimmed_block_range = if len > MAX_BLOCKS { - block_from..=BlockNumber::from(block_from.as_u32() + MAX_BLOCKS) - } else { - block_range - }; - - let mmr_delta = self - .state - .sync_chain_mmr(trimmed_block_range.clone()) - .await - .map_err(internal_error)?; + let mmr_delta = + self.state.sync_chain_mmr(block_range.clone()).await.map_err(internal_error)?; Ok(Response::new(proto::rpc::SyncChainMmrResponse { - pagination_info: Some(proto::rpc::PaginationInfo { - chain_tip: chain_tip.as_u32(), - block_num: trimmed_block_range.end().as_u32(), + block_range: Some(proto::rpc::BlockRange { + block_from: block_range.start().as_u32(), + block_to: Some(block_range.end().as_u32()), }), mmr_delta: Some(mmr_delta.into()), })) diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 821b6755c..993b3be68 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -120,16 +120,6 @@ impl QueryParamLimiter for QueryParamBlockLimit { const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints: -/// * `sync_chain_mmr` -/// -/// Capped at 1000 blocks to keep MMR deltas within the 4 MB payload budget. -pub struct QueryParamBlockRangeLimit; -impl QueryParamLimiter for QueryParamBlockRangeLimit { - const PARAM_NAME: &str = "block_range"; - const LIMIT: usize = GENERAL_REQUEST_LIMIT; -} - /// Used for the following RPC endpoints /// * `get_account` /// diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index 08ba2fc3f..69b722406 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -23,6 +23,7 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) - [Status](#status) @@ -216,6 +217,12 @@ Caller specifies the `account_id` of the public account and the block range (`bl This endpoint enables clients to maintain an updated view of account storage. +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range, but at most to (including) the chain tip. + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index 3a189d6c1..59f587f67 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -103,6 +103,7 @@ service Api { // Returns storage map updates for specified account and storage slots within a block range. rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} + // Returns MMR delta needed to synchronize the chain MMR within the requested block range. rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} } @@ -494,11 +495,10 @@ message SyncChainMmrRequest { // Represents the result of syncing chain MMR. message SyncChainMmrResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - + // For which block range the MMR delta is returned. + BlockRange block_range = 1; // Data needed to update the partial MMR from `request.block_range.block_from + 1` to - // `pagination_info.block_num`. + // `response.block_range.block_to` or the chain tip. primitives.MmrDelta mmr_delta = 2; } From 5f6568268d5ade484e56ebebbe46f4a79516f34f Mon Sep 17 00:00:00 2001 From: johnh4098 <113304207+johnh4098@users.noreply.github.com> Date: Thu, 19 Feb 2026 12:43:21 +0330 Subject: [PATCH 43/77] =?UTF-8?q?fix(rpc):=20add=20missing=20list=20valida?= =?UTF-8?q?tion=20for=20sync=5Ftransactions=20and=20remove=20inc=E2=80=A6?= =?UTF-8?q?=20(#1687)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crates/rpc/src/server/api.rs | 10 ++-------- crates/rpc/src/tests.rs | 27 ++++++++------------------- 2 files changed, 10 insertions(+), 27 deletions(-) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 96836add9..f2a88cc05 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -479,6 +479,8 @@ impl api_server::Api for RpcService { ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); + check::(request.get_ref().account_ids.len())?; + self.store.clone().sync_transactions(request).await } @@ -536,14 +538,6 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { "SyncTransactions".into(), endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), ), - ( - "SyncAccountVault".into(), - endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), - ), - ( - "SyncAccountStorageMaps".into(), - endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), - ), ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), ( diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index e4218bcb6..e70d14563 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -512,26 +512,15 @@ async fn get_limits_endpoint() { QueryParamAccountIdLimit::LIMIT ); - let sync_account_vault = - limits.endpoints.get("SyncAccountVault").expect("SyncAccountVault should exist"); - assert_eq!( - sync_account_vault.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), - Some(&(QueryParamAccountIdLimit::LIMIT as u32)), - "SyncAccountVault {} limit should be {}", - QueryParamAccountIdLimit::PARAM_NAME, - QueryParamAccountIdLimit::LIMIT + // SyncAccountVault and SyncAccountStorageMaps accept a singular account_id, + // not a repeated list, so they do not have list parameter limits. + assert!( + !limits.endpoints.contains_key("SyncAccountVault"), + "SyncAccountVault should not have list parameter limits" ); - - let sync_account_storage_maps = limits - .endpoints - .get("SyncAccountStorageMaps") - .expect("SyncAccountStorageMaps should exist"); - assert_eq!( - sync_account_storage_maps.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), - Some(&(QueryParamAccountIdLimit::LIMIT as u32)), - "SyncAccountStorageMaps {} limit should be {}", - QueryParamAccountIdLimit::PARAM_NAME, - QueryParamAccountIdLimit::LIMIT + assert!( + !limits.endpoints.contains_key("SyncAccountStorageMaps"), + "SyncAccountStorageMaps should not have list parameter limits" ); // Verify GetNotesById endpoint From be2771da4216378918baff637a18b8e78cc2d877 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 20 Feb 2026 06:31:40 +1300 Subject: [PATCH 44/77] chore: Refactor common db capabilities into separate crate (#1685) --- Cargo.lock | 23 ++- Cargo.toml | 2 + crates/db/Cargo.toml | 23 +++ crates/db/src/conv.rs | 183 ++++++++++++++++++ .../{ntx-builder/src/db => db/src}/errors.rs | 112 ++++++----- crates/db/src/lib.rs | 76 ++++++++ crates/{store/src/db => db/src}/manager.rs | 6 +- crates/ntx-builder/Cargo.toml | 4 +- crates/ntx-builder/src/db/manager.rs | 86 -------- crates/ntx-builder/src/db/migrations.rs | 2 +- crates/ntx-builder/src/db/mod.rs | 116 ----------- crates/ntx-builder/src/db/schema_hash.rs | 5 +- crates/ntx-builder/src/lib.rs | 4 +- crates/store/Cargo.toml | 2 +- crates/store/src/db/migrations.rs | 2 +- crates/store/src/db/mod.rs | 94 +++------ crates/store/src/db/models/queries/notes.rs | 17 +- crates/store/src/db/schema_hash.rs | 8 +- crates/store/src/errors.rs | 134 +------------ crates/store/src/lib.rs | 3 +- crates/validator/Cargo.toml | 3 +- crates/validator/src/block_validation/mod.rs | 2 +- crates/validator/src/db/migrations.rs | 2 +- crates/validator/src/db/mod.rs | 9 +- crates/validator/src/db/models.rs | 2 +- crates/validator/src/server/mod.rs | 7 +- 26 files changed, 439 insertions(+), 488 deletions(-) create mode 100644 crates/db/Cargo.toml create mode 100644 crates/db/src/conv.rs rename crates/{ntx-builder/src/db => db/src}/errors.rs (51%) create mode 100644 crates/db/src/lib.rs rename crates/{store/src/db => db/src}/manager.rs (95%) delete mode 100644 crates/ntx-builder/src/db/manager.rs diff --git a/Cargo.lock b/Cargo.lock index 09653daa0..e97b7fd51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1015,7 +1015,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524bc3df0d57e98ecd022e21ba31166c2625e7d3e5bcc4510efaeeab4abcab04" dependencies = [ "deadpool-runtime", - "tracing", ] [[package]] @@ -2795,6 +2794,19 @@ dependencies = [ "winterfell", ] +[[package]] +name = "miden-node-db" +version = "0.14.0" +dependencies = [ + "deadpool", + "deadpool-diesel", + "deadpool-sync", + "diesel", + "miden-protocol", + "thiserror 2.0.18", + "tracing", +] + [[package]] name = "miden-node-grpc-error-macro" version = "0.14.0" @@ -2808,14 +2820,12 @@ name = "miden-node-ntx-builder" version = "0.14.0" dependencies = [ "anyhow", - "deadpool", - "deadpool-diesel", - "deadpool-sync", "diesel", "diesel_migrations", "futures", "indexmap 2.13.0", "libsqlite3-sys", + "miden-node-db", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", @@ -2913,7 +2923,6 @@ dependencies = [ "criterion", "deadpool", "deadpool-diesel", - "deadpool-sync", "diesel", "diesel_migrations", "fs-err", @@ -2923,6 +2932,7 @@ dependencies = [ "libsqlite3-sys", "miden-block-prover", "miden-crypto", + "miden-node-db", "miden-node-proto", "miden-node-proto-build", "miden-node-rocksdb-cxx-linkage-fix", @@ -3012,12 +3022,11 @@ name = "miden-node-validator" version = "0.14.0" dependencies = [ "anyhow", - "deadpool-diesel", "diesel", "diesel_migrations", + "miden-node-db", "miden-node-proto", "miden-node-proto-build", - "miden-node-store", "miden-node-utils", "miden-protocol", "miden-tx", diff --git a/Cargo.toml b/Cargo.toml index db02abc0d..116e3548c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "bin/remote-prover", "bin/stress-test", "crates/block-producer", + "crates/db", "crates/grpc-error-macro", "crates/ntx-builder", "crates/proto", @@ -41,6 +42,7 @@ debug = true [workspace.dependencies] # Workspace crates. miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } +miden-node-db = { path = "crates/db", version = "0.14" } miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } miden-node-proto = { path = "crates/proto", version = "0.14" } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml new file mode 100644 index 000000000..2a42af430 --- /dev/null +++ b/crates/db/Cargo.toml @@ -0,0 +1,23 @@ +[package] +authors.workspace = true +description = "Shared database capabilities for Miden node" +edition.workspace = true +homepage.workspace = true +keywords = ["database", "miden", "node"] +license.workspace = true +name = "miden-node-db" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +deadpool = { default-features = false, workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { default-features = false, workspace = true } +diesel = { features = ["sqlite"], workspace = true } +miden-protocol = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } diff --git a/crates/db/src/conv.rs b/crates/db/src/conv.rs new file mode 100644 index 000000000..64c853c73 --- /dev/null +++ b/crates/db/src/conv.rs @@ -0,0 +1,183 @@ +//! Central place to define conversion from and to database primitive types +//! +//! Eventually, all of them should have types and we can implement a trait for them +//! rather than function pairs. +//! +//! Notice: All of them are infallible. The invariant is a sane content of the database +//! and humans ensure the sanity of casts. +//! +//! Notice: Keep in mind if you _need_ to expand the datatype, only if you require sorting this is +//! mandatory! +//! +//! Notice: Ensure you understand what casting does at the bit-level before changing any. +//! +//! Notice: Changing any of these are _backwards-incompatible_ changes that are not caught/covered +//! by migrations! + +#![expect( + clippy::inline_always, + reason = "Just unification helpers of 1-2 lines of casting types" +)] +#![expect( + dead_code, + reason = "Not all converters are used bidirectionally, however, keeping them is a good thing" +)] +#![expect( + clippy::cast_sign_loss, + reason = "This is the one file where we map the signed database types to the working types" +)] +#![expect( + clippy::cast_possible_wrap, + reason = "We will not approach the item count where i64 and usize casting will cause issues + on relevant platforms" +)] + +use miden_protocol::Felt; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteTag; + +#[derive(Debug, thiserror::Error)] +#[error("failed to convert from database type {from_type} into {into_type}")] +pub struct DatabaseTypeConversionError { + source: Box, + from_type: &'static str, + into_type: &'static str, +} + +/// Convert from and to it's database representation and back +/// +/// We do not assume sanity of DB types. +pub trait SqlTypeConvert: Sized { + type Raw: Sized; + + fn to_raw_sql(self) -> Self::Raw; + fn from_raw_sql(_raw: Self::Raw) -> Result; + + fn map_err( + source: E, + ) -> DatabaseTypeConversionError { + DatabaseTypeConversionError { + source: Box::new(source), + from_type: std::any::type_name::(), + into_type: std::any::type_name::(), + } + } +} + +impl SqlTypeConvert for BlockNumber { + type Raw = i64; + + fn from_raw_sql(raw: Self::Raw) -> Result { + u32::try_from(raw).map(BlockNumber::from).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + i64::from(self.as_u32()) + } +} + +impl SqlTypeConvert for NoteTag { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[expect(clippy::cast_sign_loss)] + Ok(NoteTag::new(raw as u32)) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + self.as_u32() as i32 + } +} + +impl SqlTypeConvert for StorageSlotType { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + + Ok(match raw { + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, + }) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + match self { + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, + } + } +} + +impl SqlTypeConvert for StorageSlotName { + type Raw = String; + + fn from_raw_sql(raw: Self::Raw) -> Result { + StorageSlotName::new(raw).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + String::from(self) + } +} + +// Raw type conversions - eventually introduce wrapper types +// =========================================================== + +#[inline(always)] +pub(crate) fn raw_sql_to_nullifier_prefix(raw: i32) -> u16 { + debug_assert!(raw >= 0); + raw as u16 +} +#[inline(always)] +pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { + i32::from(prefix) +} + +#[inline(always)] +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { + debug_assert!(raw >= 0); + Felt::new(raw as u64) +} +#[inline(always)] +pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { + nonce.as_int() as i64 +} + +#[inline(always)] +pub(crate) fn raw_sql_to_fungible_delta(raw: i64) -> i64 { + raw +} +#[inline(always)] +pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { + delta +} + +#[inline(always)] +#[expect(clippy::cast_sign_loss)] +pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { + raw as u8 +} +#[inline(always)] +pub(crate) fn note_type_to_raw_sql(note_type: u8) -> i32 { + i32::from(note_type) +} + +#[inline(always)] +pub(crate) fn raw_sql_to_idx(raw: i32) -> usize { + raw as usize +} +#[inline(always)] +pub(crate) fn idx_to_raw_sql(idx: usize) -> i32 { + idx as i32 +} diff --git a/crates/ntx-builder/src/db/errors.rs b/crates/db/src/errors.rs similarity index 51% rename from crates/ntx-builder/src/db/errors.rs rename to crates/db/src/errors.rs index 1ea43e382..222f1166e 100644 --- a/crates/ntx-builder/src/db/errors.rs +++ b/crates/db/src/errors.rs @@ -1,59 +1,19 @@ -use deadpool_sync::InteractError; - -use crate::db::manager::ConnectionManagerError; - -// DATABASE ERRORS -// ================================================================================================ - -#[derive(Debug, thiserror::Error)] -pub enum DatabaseError { - #[error("setup deadpool connection pool failed")] - ConnectionPoolObtainError(#[from] Box), - #[error(transparent)] - Diesel(#[from] diesel::result::Error), - #[error("SQLite pool interaction failed: {0}")] - InteractError(String), - #[error("schema verification failed")] - SchemaVerification(#[from] SchemaVerificationError), - #[error("connection manager error")] - ConnectionManager(#[source] ConnectionManagerError), -} +use std::any::type_name; +use std::io; -impl DatabaseError { - /// Converts from `InteractError`. - /// - /// Required since `InteractError` has at least one enum variant that is _not_ `Send + - /// Sync` and hence prevents the `Sync` auto implementation. This does an internal - /// conversion to string while maintaining convenience. - pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { - let msg = msg.to_string(); - Self::InteractError(format!("{msg} failed: {e:?}")) - } -} - -// DATABASE SETUP ERRORS -// ================================================================================================ - -#[derive(Debug, thiserror::Error)] -pub enum DatabaseSetupError { - #[error("I/O error")] - Io(#[from] std::io::Error), - #[error("database error")] - Database(#[from] DatabaseError), - #[error("pool build error")] - PoolBuild(#[source] deadpool::managed::BuildError), -} +use deadpool_sync::InteractError; +use thiserror::Error; -// SCHEMA VERIFICATION ERRORS -// ================================================================================================ +// SCHEMA VERIFICATION ERROR +// ================================================================================================= /// Errors that can occur during schema verification. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, Error)] pub enum SchemaVerificationError { #[error("failed to create in-memory reference database")] InMemoryDbCreation(#[source] diesel::ConnectionError), #[error("failed to apply migrations to reference database")] - MigrationApplication(#[source] Box), + MigrationApplication(#[source] Box), #[error("failed to extract schema from database")] SchemaExtraction(#[source] diesel::result::Error), #[error( @@ -67,3 +27,59 @@ pub enum SchemaVerificationError { extra_count: usize, }, } + +// DATABASE ERROR +// ================================================================================================= + +#[derive(Debug, Error)] +pub enum DatabaseError { + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("setup deadpool connection pool failed")] + ConnectionPoolObtainError(#[from] Box), + #[error("conversion from SQL to rust type {to} failed")] + ConversionSqlToRust { + #[source] + inner: Option>, + to: &'static str, + }, + #[error(transparent)] + Diesel(#[from] diesel::result::Error), + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), + #[error("I/O error")] + Io(#[from] io::Error), + #[error("pool build error")] + PoolBuild(#[from] deadpool::managed::BuildError), + #[error("Setup deadpool connection pool failed")] + Pool(#[from] deadpool::managed::PoolError), +} + +impl DatabaseError { + /// Converts from `InteractError` + /// + /// Note: Required since `InteractError` has at least one enum + /// variant that is _not_ `Send + Sync` and hence prevents the + /// `Sync` auto implementation. + /// This does an internal conversion to string while maintaining + /// convenience. + /// + /// Using `MSG` as const so it can be called as + /// `.map_err(DatabaseError::interact::<"Your message">)` + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } + + /// Failed to convert an SQL entry to a rust representation + pub fn conversiont_from_sql(err: MaybeE) -> DatabaseError + where + MaybeE: Into>, + E: std::error::Error + Send + Sync + 'static, + { + DatabaseError::ConversionSqlToRust { + inner: err.into().map(|err| Box::new(err) as Box), + to: type_name::(), + } + } +} diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs new file mode 100644 index 000000000..c3358eae3 --- /dev/null +++ b/crates/db/src/lib.rs @@ -0,0 +1,76 @@ +mod conv; +mod errors; +mod manager; + +use std::path::Path; + +pub use conv::{DatabaseTypeConversionError, SqlTypeConvert}; +use diesel::{RunQueryDsl, SqliteConnection}; +pub use errors::{DatabaseError, SchemaVerificationError}; +pub use manager::{ConnectionManager, ConnectionManagerError, configure_connection_on_creation}; +use tracing::Instrument; + +pub type Result = std::result::Result; + +/// Database handle that provides fundamental operations that various components of Miden Node can +/// utililze for their storage needs. +pub struct Db { + pool: deadpool_diesel::Pool>, +} + +impl Db { + /// Creates a new database instance with the provided connection pool. + pub fn new(database_filepath: &Path) -> Result { + let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); + let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; + Ok(Self { pool }) + } + + /// Create and commit a transaction with the queries added in the provided closure + pub async fn transact(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result + + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .in_current_span() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) + .in_current_span() + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Run the query _without_ a transaction + pub async fn query(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(move |conn| { + let r = query(conn)?; + Ok(r) + }) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } +} diff --git a/crates/store/src/db/manager.rs b/crates/db/src/manager.rs similarity index 95% rename from crates/store/src/db/manager.rs rename to crates/db/src/manager.rs index 5ac72e0ad..e3b21be18 100644 --- a/crates/store/src/db/manager.rs +++ b/crates/db/src/manager.rs @@ -75,11 +75,11 @@ impl deadpool::managed::Manager for ConnectionManager { } } -pub(crate) fn configure_connection_on_creation( +pub fn configure_connection_on_creation( conn: &mut SqliteConnection, ) -> Result<(), ConnectionManagerError> { - // Wait up to 5 seconds for writer locks before erroring. - diesel::sql_query("PRAGMA busy_timeout=5000") + // Wait up to 3 seconds for writer locks before erroring. + diesel::sql_query("PRAGMA busy_timeout=3000") .execute(conn) .map_err(ConnectionManagerError::ConnectionParamSetup)?; diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 1d34db128..169a47207 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -15,14 +15,12 @@ workspace = true [dependencies] anyhow = { workspace = true } -deadpool = { features = ["managed", "rt_tokio_1"], workspace = true } -deadpool-diesel = { features = ["sqlite"], workspace = true } -deadpool-sync = { features = ["tracing"], workspace = true } diesel = { features = ["numeric", "sqlite"], workspace = true } diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } indexmap = { workspace = true } libsqlite3-sys = { workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } diff --git a/crates/ntx-builder/src/db/manager.rs b/crates/ntx-builder/src/db/manager.rs deleted file mode 100644 index 4234e09dd..000000000 --- a/crates/ntx-builder/src/db/manager.rs +++ /dev/null @@ -1,86 +0,0 @@ -//! A minimal connection manager wrapper. -//! -//! Only required to setup connection parameters, specifically `WAL`. - -use deadpool_sync::InteractError; -use diesel::{RunQueryDsl, SqliteConnection}; - -#[derive(thiserror::Error, Debug)] -pub enum ConnectionManagerError { - #[error("failed to apply connection parameter")] - ConnectionParamSetup(#[source] diesel::result::Error), - #[error("SQLite pool interaction failed: {0}")] - InteractError(String), - #[error("failed to create a new connection")] - ConnectionCreate(#[source] deadpool_diesel::Error), - #[error("failed to recycle connection")] - PoolRecycle(#[source] deadpool::managed::RecycleError), -} - -impl ConnectionManagerError { - /// Converts from `InteractError`. - /// - /// Required since `InteractError` has at least one enum variant that is _not_ `Send + - /// Sync` and hence prevents the `Sync` auto implementation. - pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { - let msg = msg.to_string(); - Self::InteractError(format!("{msg} failed: {e:?}")) - } -} - -/// Create a connection manager with per-connection setup. -/// -/// Particularly, `foreign_key` checks are enabled and using a write-append-log for journaling. -pub(crate) struct ConnectionManager { - pub(crate) manager: deadpool_diesel::sqlite::Manager, -} - -impl ConnectionManager { - pub(crate) fn new(database_path: &str) -> Self { - let manager = deadpool_diesel::sqlite::Manager::new( - database_path.to_owned(), - deadpool_diesel::sqlite::Runtime::Tokio1, - ); - Self { manager } - } -} - -impl deadpool::managed::Manager for ConnectionManager { - type Type = deadpool_sync::SyncWrapper; - type Error = ConnectionManagerError; - - async fn create(&self) -> Result { - let conn = self.manager.create().await.map_err(ConnectionManagerError::ConnectionCreate)?; - - conn.interact(configure_connection_on_creation) - .await - .map_err(|e| ConnectionManagerError::interact("Connection setup", &e))??; - Ok(conn) - } - - async fn recycle( - &self, - conn: &mut Self::Type, - metrics: &deadpool_diesel::Metrics, - ) -> deadpool::managed::RecycleResult { - self.manager.recycle(conn, metrics).await.map_err(|err| { - deadpool::managed::RecycleError::Backend(ConnectionManagerError::PoolRecycle(err)) - })?; - Ok(()) - } -} - -pub(crate) fn configure_connection_on_creation( - conn: &mut SqliteConnection, -) -> Result<(), ConnectionManagerError> { - // Enable the WAL mode. This allows concurrent reads while a write is in progress. - diesel::sql_query("PRAGMA journal_mode=WAL") - .execute(conn) - .map_err(ConnectionManagerError::ConnectionParamSetup)?; - - // Enable foreign key checks. - diesel::sql_query("PRAGMA foreign_keys=ON") - .execute(conn) - .map_err(ConnectionManagerError::ConnectionParamSetup)?; - Ok(()) -} diff --git a/crates/ntx-builder/src/db/migrations.rs b/crates/ntx-builder/src/db/migrations.rs index 069bdd411..f3955cb2a 100644 --- a/crates/ntx-builder/src/db/migrations.rs +++ b/crates/ntx-builder/src/db/migrations.rs @@ -1,9 +1,9 @@ use diesel::SqliteConnection; use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use miden_node_db::DatabaseError; use tracing::instrument; use crate::COMPONENT; -use crate::db::errors::DatabaseError; use crate::db::schema_hash::verify_schema; // The rebuild is automatically triggered by `build.rs` as described in diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs index 488673b91..3d1c27bee 100644 --- a/crates/ntx-builder/src/db/mod.rs +++ b/crates/ntx-builder/src/db/mod.rs @@ -1,121 +1,5 @@ -use std::path::PathBuf; - -use anyhow::Context; -use diesel::{Connection, SqliteConnection}; -use tracing::{info, instrument}; - -use crate::COMPONENT; -use crate::db::errors::{DatabaseError, DatabaseSetupError}; -use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; -use crate::db::migrations::apply_migrations; - -pub mod errors; -pub(crate) mod manager; - mod migrations; mod schema_hash; /// [diesel](https://diesel.rs) generated schema. pub(crate) mod schema; - -pub type Result = std::result::Result; - -pub struct Db { - pool: deadpool_diesel::Pool>, -} - -impl Db { - /// Creates a new database file, configures it, and applies migrations. - /// - /// This is a synchronous one-shot setup used during node initialization. - /// For runtime access with a connection pool, use [`Db::load`]. - #[instrument( - target = COMPONENT, - name = "ntx_builder.database.bootstrap", - skip_all, - fields(path=%database_filepath.display()), - err, - )] - pub fn bootstrap(database_filepath: PathBuf) -> anyhow::Result<()> { - let mut conn: SqliteConnection = diesel::sqlite::SqliteConnection::establish( - database_filepath.to_str().context("database filepath is invalid")?, - ) - .context("failed to open a database connection")?; - - configure_connection_on_creation(&mut conn)?; - - // Run migrations. - apply_migrations(&mut conn).context("failed to apply database migrations")?; - - Ok(()) - } - - /// Create and commit a transaction with the queries added in the provided closure. - #[expect(dead_code)] - pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send - + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result - + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - - /// Run the query _without_ a transaction. - pub(crate) async fn query(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(move |conn| { - let r = query(conn)?; - Ok(r) - }) - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - - /// Opens a connection pool to an existing database and re-applies pending migrations. - /// - /// Use [`Db::bootstrap`] first to create and initialize the database file. - #[instrument(target = COMPONENT, skip_all)] - pub async fn load(database_filepath: PathBuf) -> Result { - let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); - let pool = deadpool_diesel::Pool::builder(manager) - .max_size(16) - .build() - .map_err(DatabaseSetupError::PoolBuild)?; - - info!( - target: COMPONENT, - sqlite = %database_filepath.display(), - "Connected to the database" - ); - - let me = Db { pool }; - me.query("migrations", apply_migrations).await?; - Ok(me) - } -} diff --git a/crates/ntx-builder/src/db/schema_hash.rs b/crates/ntx-builder/src/db/schema_hash.rs index 21ebb0c7b..80d00b4c4 100644 --- a/crates/ntx-builder/src/db/schema_hash.rs +++ b/crates/ntx-builder/src/db/schema_hash.rs @@ -11,10 +11,10 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use diesel_migrations::MigrationHarness; +use miden_node_db::SchemaVerificationError; use tracing::instrument; use crate::COMPONENT; -use crate::db::errors::SchemaVerificationError; use crate::db::migrations::MIGRATIONS; /// Represents a schema object for comparison. @@ -131,8 +131,9 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati #[cfg(test)] mod tests { + use miden_node_db::DatabaseError; + use super::*; - use crate::db::errors::DatabaseError; use crate::db::migrations::apply_migrations; #[test] diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index d77a8dd7d..04c631c05 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -16,7 +16,9 @@ mod actor; mod block_producer; mod builder; mod coordinator; -#[expect(dead_code, reason = "will be used as part of follow-up work")] +// TODO(santi): Remove this attr when the module is actually used. Dead code lint fails due to the +// tests. +#[cfg(test)] pub(crate) mod db; mod store; diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 5ce4daee7..82466fcba 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -18,7 +18,6 @@ workspace = true anyhow = { workspace = true } deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } diesel = { features = ["numeric", "sqlite"], version = "2.3" } diesel_migrations = { features = ["sqlite"], version = "2.3" } fs-err = { workspace = true } @@ -28,6 +27,7 @@ indexmap = { workspace = true } libsqlite3-sys = { workspace = true } miden-block-prover = { workspace = true } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { workspace = true } diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index 8aa0f0a00..10ce01409 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -13,7 +13,7 @@ pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations" #[instrument(level = "debug", target = COMPONENT, skip_all, err)] pub fn apply_migrations( conn: &mut SqliteConnection, -) -> std::result::Result<(), crate::errors::DatabaseError> { +) -> std::result::Result<(), miden_node_db::DatabaseError> { let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); tracing::info!(target = COMPONENT, migrations = migrations.len(), "Applying migrations"); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 5ab4d55b1..803d532f0 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,5 +1,5 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::ops::RangeInclusive; +use std::ops::{Deref, DerefMut, RangeInclusive}; use std::path::PathBuf; use anyhow::Context; @@ -23,10 +23,9 @@ use miden_protocol::note::{ use miden_protocol::transaction::TransactionId; use miden_protocol::utils::{Deserializable, Serializable}; use tokio::sync::oneshot; -use tracing::{Instrument, info, instrument}; +use tracing::{info, instrument}; use crate::COMPONENT; -use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; use crate::db::models::queries::StorageMapValuesPage; @@ -36,11 +35,9 @@ pub use crate::db::models::queries::{ PublicAccountIdsPage, }; use crate::db::models::{Page, queries}; -use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError}; +use crate::errors::{DatabaseError, NoteSyncError}; use crate::genesis::GenesisBlock; -pub(crate) mod manager; - mod migrations; mod schema_hash; @@ -54,8 +51,25 @@ pub(crate) mod schema; pub type Result = std::result::Result; +/// The Store's database. +/// +/// Extends the underlying [`miden_node_db::Db`] type with functionality specific to the Store. pub struct Db { - pool: deadpool_diesel::Pool>, + db: miden_node_db::Db, +} + +impl Deref for Db { + type Target = miden_node_db::Db; + + fn deref(&self) -> &Self::Target { + &self.db + } +} + +impl DerefMut for Db { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.db + } } /// Describes the value of an asset for an account ID at `block_num` specifically. @@ -209,11 +223,6 @@ impl From for NoteSyncRecord { } impl Db { - /// Creates a new database instance with the provided connection pool. - pub fn new(pool: deadpool_diesel::Pool) -> Self { - Self { pool } - } - /// Creates a new database and inserts the genesis block. #[instrument( target = COMPONENT, @@ -233,7 +242,7 @@ impl Db { ) .context("failed to open a database connection")?; - configure_connection_on_creation(&mut conn)?; + miden_node_db::configure_connection_on_creation(&mut conn)?; // Run migrations. apply_migrations(&mut conn).context("failed to apply database migrations")?; @@ -255,69 +264,18 @@ impl Db { Ok(()) } - /// Create and commit a transaction with the queries added in the provided closure - pub async fn transact(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send - + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result - + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .in_current_span() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) - .in_current_span() - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - - /// Run the query _without_ a transaction - pub async fn query(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(move |conn| { - let r = query(conn)?; - Ok(r) - }) - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - /// Open a connection to the DB and apply any pending migrations. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(database_filepath: PathBuf) -> Result { - let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); - let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; - + pub async fn load(database_filepath: PathBuf) -> Result { + let db = miden_node_db::Db::new(&database_filepath)?; info!( target: COMPONENT, sqlite= %database_filepath.display(), "Connected to the database" ); - let me = Db { pool }; - me.query("migrations", apply_migrations).await?; - Ok(me) + db.query("migrations", apply_migrations).await?; + Ok(Self { db }) } /// Returns a page of nullifiers for tree rebuilding. diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 083cb15aa..67b3a708b 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -682,9 +682,14 @@ impl TryInto for NoteRecordWithScriptRawJoined { { let storage = NoteStorage::read_from_bytes(&storage[..])?; let serial_num = Word::read_from_bytes(&serial_num[..])?; - let script = script.ok_or_else(|| { - DatabaseError::conversiont_from_sql::(None) - })?; + let script = + script.ok_or_else(|| { + miden_node_db::DatabaseError::conversiont_from_sql::< + NoteRecipient, + DatabaseError, + _, + >(None) + })?; let recipient = NoteRecipient::new(serial_num, script, storage); let assets = NoteAssets::read_from_bytes(&assets[..])?; Some(NoteDetails::new(assets, recipient)) @@ -744,7 +749,7 @@ impl TryInto for NoteMetadataRawRow { fn try_into(self) -> Result { let sender = AccountId::read_from_bytes(&self.sender[..])?; let note_type = NoteType::try_from(self.note_type as u32) - .map_err(DatabaseError::conversiont_from_sql::)?; + .map_err(miden_node_db::DatabaseError::conversiont_from_sql::)?; let tag = NoteTag::new(self.tag as u32); let attachment = NoteAttachment::read_from_bytes(&self.attachment)?; Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) @@ -766,7 +771,9 @@ impl TryInto for BlockNoteIndexRawRow { let batch_index = self.batch_index as usize; let note_index = self.note_index as usize; let index = BlockNoteIndex::new(batch_index, note_index).ok_or_else(|| { - DatabaseError::conversiont_from_sql::(None) + miden_node_db::DatabaseError::conversiont_from_sql::( + None, + ) })?; Ok(index) } diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs index bcb417ce9..9a5ad1328 100644 --- a/crates/store/src/db/schema_hash.rs +++ b/crates/store/src/db/schema_hash.rs @@ -11,11 +11,11 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use diesel_migrations::MigrationHarness; +use miden_node_db::SchemaVerificationError; use tracing::instrument; use crate::COMPONENT; use crate::db::migrations::MIGRATIONS; -use crate::errors::SchemaVerificationError; /// Represents a schema object for comparison. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] @@ -139,7 +139,6 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati mod tests { use super::*; use crate::db::migrations::apply_migrations; - use crate::errors::DatabaseError; #[test] fn verify_schema_passes_for_correct_schema() { @@ -191,6 +190,9 @@ mod tests { .execute(&mut conn) .unwrap(); - assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + assert!(matches!( + apply_migrations(&mut conn), + Err(miden_node_db::DatabaseError::SchemaVerification(_)) + )); } } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 947a0bcfc..61bbf3e99 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -1,7 +1,5 @@ -use std::any::type_name; use std::io; -use deadpool_sync::InteractError; use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; @@ -30,7 +28,6 @@ use thiserror::Error; use tokio::sync::oneshot::error::RecvError; use tonic::Status; -use crate::db::manager::ConnectionManagerError; use crate::db::models::conv::DatabaseTypeConversionError; use crate::inner_forest::{InnerForestError, WitnessError}; @@ -41,60 +38,30 @@ use crate::inner_forest::{InnerForestError, WitnessError}; pub enum DatabaseError { // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES // --------------------------------------------------------------------------------------------- - #[error("account is incomplete")] - AccountIncomplete, #[error("account error")] AccountError(#[from] AccountError), - #[error("account delta error")] - AccountDeltaError(#[from] AccountDeltaError), #[error("asset vault error")] AssetVaultError(#[from] AssetVaultError), #[error("asset error")] AssetError(#[from] AssetError), #[error("closed channel")] ClosedChannel(#[from] RecvError), + #[error("database error")] + DatabaseError(#[from] miden_node_db::DatabaseError), #[error("deserialization failed")] DeserializationError(#[from] DeserializationError), - #[error("hex parsing error")] - FromHexError(#[from] hex::FromHexError), #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] MerkleError(#[from] MerkleError), - #[error("network account error")] - NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), #[error("storage map error")] StorageMapError(#[from] StorageMapError), - #[error("setup deadpool connection pool failed")] - Deadpool(#[from] deadpool::managed::PoolError), - #[error("setup deadpool connection pool failed")] - ConnectionPoolObtainError(#[from] Box), #[error(transparent)] Diesel(#[from] diesel::result::Error), - #[error("sqlite FFI boundary NUL termination error (not much you can do, file an issue)")] - DieselSqliteFfi(#[from] std::ffi::NulError), - #[error(transparent)] - DeadpoolDiesel(#[from] deadpool_diesel::Error), - #[error(transparent)] - PoolRecycle(#[from] deadpool::managed::RecycleError), - #[error("summing over column {column} of table {table} exceeded {limit}")] - ColumnSumExceedsLimit { - table: &'static str, - column: &'static str, - limit: &'static str, - #[source] - source: Box, - }, #[error(transparent)] QueryParamLimit(#[from] QueryLimitError), - #[error("conversion from SQL to rust type {to} failed")] - ConversionSqlToRust { - #[source] - inner: Option>, - to: &'static str, - }, // OTHER ERRORS // --------------------------------------------------------------------------------------------- @@ -102,39 +69,16 @@ pub enum DatabaseError { AccountCommitmentsMismatch { expected: Word, calculated: Word }, #[error("account {0} not found")] AccountNotFoundInDb(AccountId), - #[error("account {0} state at block height {1} not found")] - AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), - #[error("block {0} not found in database")] - BlockNotFound(BlockNumber), - #[error("historical block {block_num} not available: {reason}")] - HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, - #[error("invalid storage slot type: {0}")] - InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), - #[error("SQLite pool interaction failed: {0}")] - InteractError(String), - #[error("invalid Felt: {0}")] - InvalidFelt(String), - #[error( - "unsupported database version. There is no migration chain from/to this version. \ - Remove all database files and try again." - )] - UnsupportedDatabaseVersion, - #[error("schema verification failed")] - SchemaVerification(#[from] SchemaVerificationError), - #[error(transparent)] - ConnectionManager(#[from] ConnectionManagerError), #[error(transparent)] SqlValueConversion(#[from] DatabaseTypeConversionError), - #[error("Not implemented: {0}")] - NotImplemented(String), #[error("storage root not found for account {account_id}, slot {slot_name}, block {block_num}")] StorageRootNotFound { account_id: AccountId, @@ -143,35 +87,6 @@ pub enum DatabaseError { }, } -impl DatabaseError { - /// Converts from `InteractError` - /// - /// Note: Required since `InteractError` has at least one enum - /// variant that is _not_ `Send + Sync` and hence prevents the - /// `Sync` auto implementation. - /// This does an internal conversion to string while maintaining - /// convenience. - /// - /// Using `MSG` as const so it can be called as - /// `.map_err(DatabaseError::interact::<"Your message">)` - pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { - let msg = msg.to_string(); - Self::InteractError(format!("{msg} failed: {e:?}")) - } - - /// Failed to convert an SQL entry to a rust representation - pub fn conversiont_from_sql(err: MaybeE) -> DatabaseError - where - MaybeE: Into>, - E: std::error::Error + Send + Sync + 'static, - { - DatabaseError::ConversionSqlToRust { - inner: err.into().map(|err| Box::new(err) as Box), - to: type_name::(), - } - } -} - impl From for Status { fn from(err: DatabaseError) -> Self { match err { @@ -204,7 +119,7 @@ pub enum StateInitializationError { #[error("failed to load block store")] BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] - DatabaseLoadError(#[from] DatabaseSetupError), + DatabaseLoadError(#[from] miden_node_db::DatabaseError), #[error("inner forest error")] InnerForestError(#[from] InnerForestError), #[error( @@ -224,20 +139,6 @@ pub enum StateInitializationError { AccountToDeltaConversionFailed(String), } -#[derive(Debug, Error)] -pub enum DatabaseSetupError { - #[error("I/O error")] - Io(#[from] io::Error), - #[error("database error")] - Database(#[from] DatabaseError), - #[error("genesis block error")] - GenesisBlock(#[from] GenesisError), - #[error("pool build error")] - PoolBuild(#[from] deadpool::managed::BuildError), - #[error("Setup deadpool connection pool failed")] - Pool(#[from] deadpool::managed::PoolError), -} - #[derive(Debug, Error)] pub enum GenesisError { // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES @@ -383,6 +284,9 @@ pub enum NoteSyncError { #[error("database error")] #[grpc(internal)] DatabaseError(#[from] DatabaseError), + #[error("database error")] + #[grpc(internal)] + UnderlyingDatabaseError(#[from] miden_node_db::DatabaseError), #[error("block headers table is empty")] #[grpc(internal)] EmptyBlockHeadersTable, @@ -590,30 +494,6 @@ pub enum GetWitnessesError { WitnessError(#[from] WitnessError), } -// SCHEMA VERIFICATION ERRORS -// ================================================================================================= - -/// Errors that can occur during schema verification. -#[derive(Debug, Error)] -pub enum SchemaVerificationError { - #[error("failed to create in-memory reference database")] - InMemoryDbCreation(#[source] diesel::ConnectionError), - #[error("failed to apply migrations to reference database")] - MigrationApplication(#[source] Box), - #[error("failed to extract schema from database")] - SchemaExtraction(#[source] diesel::result::Error), - #[error( - "schema mismatch: expected {expected_count} objects, found {actual_count} \ - ({missing_count} missing, {extra_count} unexpected)" - )] - Mismatch { - expected_count: usize, - actual_count: usize, - missing_count: usize, - extra_count: usize, - }, -} - #[cfg(test)] mod get_account_error_tests { use miden_protocol::account::AccountId; @@ -701,7 +581,6 @@ mod compile_tests { AccountDeltaError, AccountError, DatabaseError, - DatabaseSetupError, DeserializationError, GenesisError, NetworkAccountError, @@ -733,7 +612,6 @@ mod compile_tests { ensure_is_error::>(PhantomData); ensure_is_error::(PhantomData); - ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 06bba2fe8..519f8504b 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -11,9 +11,8 @@ pub mod state; pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use db::Db; -pub use db::manager::ConnectionManager; pub use db::models::conv::SqlTypeConvert; -pub use errors::{DatabaseError, DatabaseSetupError}; +pub use errors::DatabaseError; pub use genesis::GenesisState; pub use server::block_prover_client::BlockProver; pub use server::{DataDirectory, Store}; diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 26a76a2b3..570f2a8d2 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -18,12 +18,11 @@ workspace = true [dependencies] anyhow = { workspace = true } -deadpool-diesel = { workspace = true } diesel = { workspace = true } diesel_migrations = { workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-store = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { workspace = true } miden-tx = { workspace = true } diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index 143d2dee1..954d043b8 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -1,4 +1,4 @@ -use miden_node_store::{DatabaseError, Db}; +use miden_node_db::{DatabaseError, Db}; use miden_protocol::block::{BlockSigner, ProposedBlock}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::errors::ProposedBlockError; diff --git a/crates/validator/src/db/migrations.rs b/crates/validator/src/db/migrations.rs index 6896082be..240c29033 100644 --- a/crates/validator/src/db/migrations.rs +++ b/crates/validator/src/db/migrations.rs @@ -1,6 +1,6 @@ use diesel::SqliteConnection; use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; -use miden_node_store::DatabaseError; +use miden_node_db::DatabaseError; use tracing::instrument; use crate::COMPONENT; diff --git a/crates/validator/src/db/mod.rs b/crates/validator/src/db/mod.rs index 14d85e34f..4c8fe665b 100644 --- a/crates/validator/src/db/mod.rs +++ b/crates/validator/src/db/mod.rs @@ -7,7 +7,7 @@ use std::path::PathBuf; use diesel::SqliteConnection; use diesel::dsl::exists; use diesel::prelude::*; -use miden_node_store::{ConnectionManager, DatabaseError, DatabaseSetupError}; +use miden_node_db::{DatabaseError, Db}; use miden_protocol::transaction::TransactionId; use miden_protocol::utils::Serializable; use tracing::instrument; @@ -19,17 +19,14 @@ use crate::tx_validation::ValidatedTransaction; /// Open a connection to the DB and apply any pending migrations. #[instrument(target = COMPONENT, skip_all)] -pub async fn load(database_filepath: PathBuf) -> Result { - let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); - let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; - +pub async fn load(database_filepath: PathBuf) -> Result { + let db = Db::new(&database_filepath)?; tracing::info!( target: COMPONENT, sqlite= %database_filepath.display(), "Connected to the database" ); - let db = miden_node_store::Db::new(pool); db.query("migrations", apply_migrations).await?; Ok(db) } diff --git a/crates/validator/src/db/models.rs b/crates/validator/src/db/models.rs index e1e67086a..9a50b7a39 100644 --- a/crates/validator/src/db/models.rs +++ b/crates/validator/src/db/models.rs @@ -1,5 +1,5 @@ use diesel::prelude::*; -use miden_node_store::SqlTypeConvert; +use miden_node_db::SqlTypeConvert; use miden_tx::utils::Serializable; use crate::db::schema; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 94bf41315..7f71161a2 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -4,10 +4,10 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; +use miden_node_db::Db; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; -use miden_node_store::Db; use miden_node_utils::ErrorReport; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::OpenTelemetrySpanExt; @@ -154,7 +154,10 @@ impl api_server::Api for ValidatorServer // Store the validated transaction. self.db .transact("insert_transaction", move |conn| insert_transaction(conn, &tx_info)) - .await?; + .await + .map_err(|err| { + Status::internal(err.as_report_context("Failed to insert transaction")) + })?; Ok(tonic::Response::new(())) } From 8370053b0c524ab504468f9b974556d2a12f57c7 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Fri, 20 Feb 2026 03:14:07 -0300 Subject: [PATCH 45/77] chore(ntx): replace in memory with sqlite database (#1662) --- CHANGELOG.md | 1 + Cargo.lock | 4 +- bin/node/src/commands/bundled.rs | 1 + bin/node/src/commands/mod.rs | 25 +- crates/db/src/errors.rs | 13 + crates/db/src/lib.rs | 1 + crates/db/src/manager.rs | 5 + crates/ntx-builder/Cargo.toml | 6 +- .../ntx-builder/src/actor/account_effect.rs | 42 ++ crates/ntx-builder/src/actor/account_state.rs | 688 +----------------- crates/ntx-builder/src/actor/inflight_note.rs | 9 + crates/ntx-builder/src/actor/mod.rs | 188 +++-- crates/ntx-builder/src/actor/note_state.rs | 235 ------ crates/ntx-builder/src/builder.rs | 73 +- crates/ntx-builder/src/coordinator.rs | 107 +-- .../db/migrations/2026020900000_setup/up.sql | 7 +- crates/ntx-builder/src/db/mod.rs | 208 ++++++ crates/ntx-builder/src/db/models/conv.rs | 77 ++ crates/ntx-builder/src/db/models/mod.rs | 3 + .../src/db/models/queries/accounts.rs | 102 +++ .../src/db/models/queries/chain_state.rs | 46 ++ .../ntx-builder/src/db/models/queries/mod.rs | 316 ++++++++ .../src/db/models/queries/notes.rs | 193 +++++ .../src/db/models/queries/tests.rs | 546 ++++++++++++++ crates/ntx-builder/src/db/schema.rs | 8 +- crates/ntx-builder/src/lib.rs | 32 +- 26 files changed, 1888 insertions(+), 1048 deletions(-) create mode 100644 crates/ntx-builder/src/actor/account_effect.rs delete mode 100644 crates/ntx-builder/src/actor/note_state.rs create mode 100644 crates/ntx-builder/src/db/models/conv.rs create mode 100644 crates/ntx-builder/src/db/models/mod.rs create mode 100644 crates/ntx-builder/src/db/models/queries/accounts.rs create mode 100644 crates/ntx-builder/src/db/models/queries/chain_state.rs create mode 100644 crates/ntx-builder/src/db/models/queries/mod.rs create mode 100644 crates/ntx-builder/src/db/models/queries/notes.rs create mode 100644 crates/ntx-builder/src/db/models/queries/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 18946d1f0..49d044e4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). - Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). - Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) +- Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). ## v0.13.5 (TBD) diff --git a/Cargo.lock b/Cargo.lock index e97b7fd51..c8a82122c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2823,7 +2823,6 @@ dependencies = [ "diesel", "diesel_migrations", "futures", - "indexmap 2.13.0", "libsqlite3-sys", "miden-node-db", "miden-node-proto", @@ -2833,7 +2832,10 @@ dependencies = [ "miden-remote-prover-client", "miden-standards", "miden-tx", + "prost", + "rand_chacha 0.9.0", "rstest", + "tempfile", "thiserror 2.0.18", "tokio", "tokio-stream", diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 795cd6fe5..707e01193 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -292,6 +292,7 @@ impl BundledCommand { store_ntx_builder_url, block_producer_url, validator_url, + &data_directory, ); let id = join_set diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index a4c908846..352a6de16 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; use std::num::NonZeroUsize; +use std::path::{Path, PathBuf}; use std::time::Duration; use anyhow::Context; @@ -123,19 +124,37 @@ pub struct NtxBuilderConfig { default_value_t = DEFAULT_NTX_SCRIPT_CACHE_SIZE )] pub script_cache_size: NonZeroUsize, + + /// Directory for the ntx-builder's persistent database. + /// + /// If not set, defaults to the node's data directory. + #[arg(long = "ntx-builder.data-directory", value_name = "DIR")] + pub data_directory: Option, } impl NtxBuilderConfig { /// Converts this CLI config into the ntx-builder's internal config. + /// + /// The `node_data_directory` is used as the default location for the ntx-builder's database + /// if `--ntx-builder.data-directory` is not explicitly set. pub fn into_builder_config( self, store_url: Url, block_producer_url: Url, validator_url: Url, + node_data_directory: &Path, ) -> miden_node_ntx_builder::NtxBuilderConfig { - miden_node_ntx_builder::NtxBuilderConfig::new(store_url, block_producer_url, validator_url) - .with_tx_prover_url(self.tx_prover_url) - .with_script_cache_size(self.script_cache_size) + let data_dir = self.data_directory.unwrap_or_else(|| node_data_directory.to_path_buf()); + let database_filepath = data_dir.join("ntx-builder.sqlite3"); + + miden_node_ntx_builder::NtxBuilderConfig::new( + store_url, + block_producer_url, + validator_url, + database_filepath, + ) + .with_tx_prover_url(self.tx_prover_url) + .with_script_cache_size(self.script_cache_size) } } diff --git a/crates/db/src/errors.rs b/crates/db/src/errors.rs index 222f1166e..5e59ff4b9 100644 --- a/crates/db/src/errors.rs +++ b/crates/db/src/errors.rs @@ -82,4 +82,17 @@ impl DatabaseError { to: type_name::(), } } + + /// Creates a deserialization error with a static context string and the original error. + /// + /// This is a convenience wrapper around [`ConversionSqlToRust`](Self::ConversionSqlToRust). + pub fn deserialization( + context: &'static str, + source: impl std::error::Error + Send + Sync + 'static, + ) -> Self { + Self::ConversionSqlToRust { + inner: Some(Box::new(source)), + to: context, + } + } } diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs index c3358eae3..7000f131d 100644 --- a/crates/db/src/lib.rs +++ b/crates/db/src/lib.rs @@ -14,6 +14,7 @@ pub type Result = std::result::Result; /// Database handle that provides fundamental operations that various components of Miden Node can /// utililze for their storage needs. +#[derive(Clone)] pub struct Db { pool: deadpool_diesel::Pool>, } diff --git a/crates/db/src/manager.rs b/crates/db/src/manager.rs index e3b21be18..c34e7a15e 100644 --- a/crates/db/src/manager.rs +++ b/crates/db/src/manager.rs @@ -94,5 +94,10 @@ pub fn configure_connection_on_creation( diesel::sql_query("PRAGMA foreign_keys=ON") .execute(conn) .map_err(ConnectionManagerError::ConnectionParamSetup)?; + + // Set busy timeout so concurrent writers wait instead of immediately failing. + diesel::sql_query("PRAGMA busy_timeout=5000") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; Ok(()) } diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 169a47207..0c30970a0 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -18,7 +18,6 @@ anyhow = { workspace = true } diesel = { features = ["numeric", "sqlite"], workspace = true } diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } -indexmap = { workspace = true } libsqlite3-sys = { workspace = true } miden-node-db = { workspace = true } miden-node-proto = { workspace = true } @@ -26,6 +25,7 @@ miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["tx-prover"], workspace = true } miden-tx = { default-features = true, workspace = true } +prost = { workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } @@ -38,8 +38,10 @@ url = { workspace = true } miden-node-test-macro = { path = "../test-macro" } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } -miden-standards = { workspace = true } +miden-standards = { features = ["testing"], workspace = true } +rand_chacha = { workspace = true } rstest = { workspace = true } +tempfile = { version = "3.20" } [package.metadata.cargo-machete] ignored = ["libsqlite3-sys"] diff --git a/crates/ntx-builder/src/actor/account_effect.rs b/crates/ntx-builder/src/actor/account_effect.rs new file mode 100644 index 000000000..7a6acf005 --- /dev/null +++ b/crates/ntx-builder/src/actor/account_effect.rs @@ -0,0 +1,42 @@ +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta, AccountId}; + +// NETWORK ACCOUNT EFFECT +// ================================================================================================ + +/// Represents the effect of a transaction on a network account. +#[derive(Clone)] +pub enum NetworkAccountEffect { + Created(Account), + Updated(AccountDelta), +} + +impl NetworkAccountEffect { + pub fn from_protocol(update: &AccountUpdateDetails) -> Option { + let update = match update { + AccountUpdateDetails::Private => return None, + AccountUpdateDetails::Delta(update) if update.is_full_state() => { + NetworkAccountEffect::Created( + Account::try_from(update) + .expect("Account should be derivable by full state AccountDelta"), + ) + }, + AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), + }; + + update.protocol_account_id().is_network().then_some(update) + } + + pub fn network_account_id(&self) -> NetworkAccountId { + // SAFETY: This is a network account by construction. + self.protocol_account_id().try_into().unwrap() + } + + fn protocol_account_id(&self) -> AccountId { + match self { + NetworkAccountEffect::Created(acc) => acc.id(), + NetworkAccountEffect::Updated(delta) => delta.id(), + } + } +} diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index b58cfd692..753dfee8a 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -1,24 +1,10 @@ -use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::num::NonZeroUsize; use std::sync::Arc; -use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; -use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::account::Account; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::note::{Note, Nullifier}; -use miden_protocol::transaction::{PartialBlockchain, TransactionId}; -use tracing::instrument; +use miden_protocol::block::BlockHeader; +use miden_protocol::transaction::PartialBlockchain; -use super::ActorShutdownReason; -use super::note_state::{AccountDeltaTracker, NetworkAccountEffect, NotePool}; -use crate::COMPONENT; use crate::actor::inflight_note::InflightNetworkNote; -use crate::builder::ChainState; -use crate::store::{StoreClient, StoreError}; // TRANSACTION CANDIDATE // ================================================================================================ @@ -45,673 +31,3 @@ pub struct TransactionCandidate { /// Wrapped in `Arc` to avoid expensive clones when reading the chain state. pub chain_mmr: Arc, } - -// NETWORK ACCOUNT STATE -// ================================================================================================ - -/// The current state of a network account. -#[derive(Clone)] -pub struct NetworkAccountState { - /// The network account ID this state represents. - account_id: NetworkAccountId, - - /// Tracks committed and inflight account state updates. - account: AccountDeltaTracker, - - /// Manages available and nullified notes. - notes: NotePool, - - /// Uncommitted transactions which have some impact on the network state. - /// - /// This is tracked so we can commit or revert transaction effects. Transactions _without_ an - /// impact are ignored. - inflight_txs: BTreeMap, - - /// Nullifiers of all network notes targeted at this account. - /// - /// Used to filter mempool events: when a `TransactionAdded` event reports consumed nullifiers, - /// only those present in this set are processed. Nullifiers are added when notes are loaded - /// or created, and removed when the consuming transaction is committed. - known_nullifiers: HashSet, -} - -impl NetworkAccountState { - /// Load's all available network notes from the store, along with the required account states. - #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] - pub async fn load( - account: Account, - account_id: NetworkAccountId, - store: &StoreClient, - block_num: BlockNumber, - ) -> Result { - let notes = store.get_unconsumed_network_notes(account_id, block_num.as_u32()).await?; - let notes = notes - .into_iter() - .map(|note| { - let NetworkNote::SingleTarget(note) = note; - note - }) - .collect::>(); - - let known_nullifiers: HashSet = - notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - - let account_tracker = AccountDeltaTracker::new(account); - let mut note_pool = NotePool::default(); - for note in notes { - note_pool.add_note(note); - } - - let state = Self { - account: account_tracker, - notes: note_pool, - account_id, - inflight_txs: BTreeMap::default(), - known_nullifiers, - }; - - state.inject_telemetry(); - - Ok(state) - } - - /// Selects the next candidate network transaction. - /// - /// # Parameters - /// - /// - `limit`: Maximum number of notes to include in the transaction. - /// - `max_note_attempts`: Maximum number of execution attempts before a note is dropped. - /// - `chain_state`: Current chain state for the transaction. - #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] - pub fn select_candidate( - &mut self, - limit: NonZeroUsize, - max_note_attempts: usize, - chain_state: ChainState, - ) -> Option { - // Remove notes that have failed too many times. - self.notes.drop_failing_notes(max_note_attempts); - - // Skip empty accounts, and prune them. - // This is how we keep the number of accounts bounded. - if self.is_empty() { - return None; - } - - // Select notes from the account that can be consumed or are ready for a retry. - let notes = self - .notes - .available_notes(&chain_state.chain_tip_header.block_num()) - .take(limit.get()) - .cloned() - .collect::>(); - - // Skip accounts with no available notes. - if notes.is_empty() { - return None; - } - - let (chain_tip_header, chain_mmr) = chain_state.into_parts(); - TransactionCandidate { - account: self.account.latest_account(), - notes, - chain_tip_header, - chain_mmr, - } - .into() - } - - /// Marks notes of a previously selected candidate as failed. - /// - /// Does not remove the candidate from the in-progress pool. - #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] - pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { - let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - self.notes.fail_notes(nullifiers.as_slice(), block_num); - } - - /// Updates state with the mempool event. - #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] - pub fn mempool_update(&mut self, update: &MempoolEvent) -> Option { - let span = tracing::Span::current(); - span.set_attribute("mempool_event.kind", update.kind()); - - match update { - MempoolEvent::TransactionAdded { - id, - nullifiers, - network_notes, - account_delta, - } => { - // Filter network notes relevant to this account. - let network_notes = filter_by_account_id_and_map_to_single_target( - self.account_id, - network_notes.clone(), - ); - self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); - }, - MempoolEvent::TransactionsReverted(txs) => { - for tx in txs { - let shutdown_reason = self.revert_transaction(*tx); - if shutdown_reason.is_some() { - return shutdown_reason; - } - } - }, - MempoolEvent::BlockCommitted { txs, .. } => { - for tx in txs { - self.commit_transaction(*tx); - } - }, - } - self.inject_telemetry(); - - // No shutdown, continue running actor. - None - } - - /// Returns `true` if there is no inflight state being tracked. - fn is_empty(&self) -> bool { - self.account.has_no_inflight() && self.notes.is_empty() - } - - /// Handles a [`MempoolEvent::TransactionAdded`] event. - fn add_transaction( - &mut self, - id: TransactionId, - nullifiers: &[Nullifier], - network_notes: &[SingleTargetNetworkNote], - account_delta: Option<&AccountUpdateDetails>, - ) { - // Skip transactions we already know about. - // - // This can occur since both ntx builder and the mempool might inform us of the same - // transaction. Once when it was submitted to the mempool, and once by the mempool event. - if self.inflight_txs.contains_key(&id) { - return; - } - - let mut tx_impact = TransactionImpact::default(); - if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let account_id = update.network_account_id(); - if account_id == self.account_id { - match update { - NetworkAccountEffect::Updated(account_delta) => { - self.account.add_delta(&account_delta); - tx_impact.account_delta = Some(account_id); - }, - NetworkAccountEffect::Created(_) => {}, - } - } - } - for note in network_notes { - assert_eq!( - note.account_id(), - self.account_id, - "note's account ID does not match network account actor's account ID" - ); - tx_impact.notes.insert(note.nullifier()); - self.known_nullifiers.insert(note.nullifier()); - self.notes.add_note(note.clone()); - } - for nullifier in nullifiers { - // Ignore nullifiers that aren't network note nullifiers. - if !self.known_nullifiers.contains(nullifier) { - continue; - } - tx_impact.nullifiers.insert(*nullifier); - let _ = self.notes.nullify(*nullifier); - } - - if !tx_impact.is_empty() { - self.inflight_txs.insert(id, tx_impact); - } - } - - /// Handles [`MempoolEvent::BlockCommitted`] events. - fn commit_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(delta_account_id) = impact.account_delta { - if delta_account_id == self.account_id { - self.account.commit_delta(); - } - } - - for nullifier in impact.nullifiers { - if self.known_nullifiers.remove(&nullifier) { - // Its possible for the account to no longer exist if the transaction creating it - // was reverted. - self.notes.commit_nullifier(nullifier); - } - } - } - - /// Handles [`MempoolEvent::TransactionsReverted`] events. - fn revert_transaction(&mut self, tx: TransactionId) -> Option { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - tracing::debug!("transaction {tx} not found in inflight transactions"); - return None; - }; - - // Revert account creation. - if let Some(account_id) = impact.account_delta { - // Account creation reverted, actor must stop. - if account_id == self.account_id && self.account.revert_delta() { - return Some(ActorShutdownReason::AccountReverted(account_id)); - } - } - - // Revert notes. - for note_nullifier in impact.notes { - if self.known_nullifiers.contains(¬e_nullifier) { - self.notes.remove_note(note_nullifier); - self.known_nullifiers.remove(¬e_nullifier); - } - } - - // Revert nullifiers. - for nullifier in impact.nullifiers { - if self.known_nullifiers.contains(&nullifier) { - self.notes.revert_nullifier(nullifier); - self.known_nullifiers.remove(&nullifier); - } - } - - None - } - - /// Adds stats to the current tracing span. - /// - /// Note that these are only visible in the OpenTelemetry context, as conventional tracing - /// does not track fields added dynamically. - fn inject_telemetry(&self) { - let span = tracing::Span::current(); - - span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); - span.set_attribute("ntx.state.notes.total", self.known_nullifiers.len()); - } -} - -/// The impact a transaction has on the state. -#[derive(Clone, Default)] -struct TransactionImpact { - /// The network account this transaction added an account delta to. - account_delta: Option, - - /// Network notes this transaction created. - notes: BTreeSet, - - /// Network notes this transaction consumed. - nullifiers: BTreeSet, -} - -impl TransactionImpact { - fn is_empty(&self) -> bool { - self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() - } -} - -/// Filters network notes by account ID and maps them to single target network notes. -fn filter_by_account_id_and_map_to_single_target( - account_id: NetworkAccountId, - notes: Vec, -) -> Vec { - notes - .into_iter() - .filter_map(|note| match note { - NetworkNote::SingleTarget(note) if note.account_id() == account_id => Some(note), - NetworkNote::SingleTarget(_) => None, - }) - .collect::>() -} - -#[cfg(test)] -mod tests { - use std::collections::HashSet; - use std::sync::{Arc, Mutex}; - - use miden_protocol::account::{AccountBuilder, AccountStorageMode, AccountType}; - use miden_protocol::asset::{Asset, FungibleAsset}; - use miden_protocol::crypto::rand::RpoRandomCoin; - use miden_protocol::note::{Note, NoteAttachment, NoteExecutionHint, NoteType}; - use miden_protocol::testing::account_id::AccountIdBuilder; - use miden_protocol::transaction::TransactionId; - use miden_protocol::{EMPTY_WORD, Felt, Hasher}; - use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; - - use super::*; - - // HELPERS - // ============================================================================================ - - /// Creates a network account for testing. - fn create_network_account(seed: u8) -> Account { - use miden_protocol::testing::noop_auth_component::NoopAuthComponent; - use miden_standards::account::wallets::BasicWallet; - - AccountBuilder::new([seed; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Network) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .expect("should be able to build test account") - } - - /// Creates a faucet account ID for testing. - fn create_faucet_id(seed: u8) -> miden_protocol::account::AccountId { - AccountIdBuilder::new() - .account_type(AccountType::FungibleFaucet) - .storage_mode(AccountStorageMode::Public) - .build_with_seed([seed; 32]) - } - - /// Creates a note targeted at the given network account. - fn create_network_note( - target_account_id: miden_protocol::account::AccountId, - seed: u8, - ) -> Note { - let coin_seed: [u64; 4] = - [u64::from(seed), u64::from(seed) + 1, u64::from(seed) + 2, u64::from(seed) + 3]; - let rng = Arc::new(Mutex::new(RpoRandomCoin::new(coin_seed.map(Felt::new).into()))); - let mut rng = rng.lock().unwrap(); - - let faucet_id = create_faucet_id(seed.wrapping_add(100)); - - let target = NetworkAccountTarget::new(target_account_id, NoteExecutionHint::Always) - .expect("NetworkAccountTarget creation should succeed for network account"); - let attachment: NoteAttachment = target.into(); - - create_p2id_note( - target_account_id, - target_account_id, - vec![Asset::Fungible(FungibleAsset::new(faucet_id, 10).unwrap())], - NoteType::Public, - attachment, - &mut *rng, - ) - .expect("note creation should succeed") - } - - /// Creates a `SingleTargetNetworkNote` from a `Note`. - fn to_single_target_note(note: Note) -> SingleTargetNetworkNote { - SingleTargetNetworkNote::try_from(note).expect("should convert to SingleTargetNetworkNote") - } - - /// Creates a mock `TransactionId` for testing. - fn mock_tx_id(seed: u8) -> TransactionId { - TransactionId::new( - Hasher::hash(&[seed; 32]), - Hasher::hash(&[seed.wrapping_add(1); 32]), - EMPTY_WORD, - EMPTY_WORD, - ) - } - - /// Creates a mock `BlockHeader` for testing. - fn mock_block_header(block_num: u32) -> miden_protocol::block::BlockHeader { - use miden_node_utils::fee::test_fee_params; - use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; - - miden_protocol::block::BlockHeader::new( - 0, - EMPTY_WORD, - BlockNumber::from(block_num), - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - SecretKey::new().public_key(), - test_fee_params(), - 0, - ) - } - - impl NetworkAccountState { - /// Creates a new `NetworkAccountState` for testing. - /// - /// This mirrors the behavior of `load()` but with provided notes instead of - /// fetching from the store. - #[cfg(test)] - pub fn new_for_testing( - account: Account, - account_id: NetworkAccountId, - notes: Vec, - ) -> Self { - let known_nullifiers: HashSet = - notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - - let account_tracker = AccountDeltaTracker::new(account); - let mut note_pool = NotePool::default(); - for note in notes { - note_pool.add_note(note); - } - - Self { - account: account_tracker, - notes: note_pool, - account_id, - inflight_txs: BTreeMap::default(), - known_nullifiers, - } - } - } - - // TESTS - // ============================================================================================ - - /// Tests that initial notes loaded into `NetworkAccountState` have their nullifiers - /// registered in `known_nullifiers`. - #[test] - fn test_initial_notes_have_nullifiers_indexed() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let note2 = to_single_target_note(create_network_note(account_id, 2)); - let nullifier1 = note1.nullifier(); - let nullifier2 = note2.nullifier(); - - let state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - - assert!( - state.known_nullifiers.contains(&nullifier1), - "known_nullifiers should contain first note's nullifier" - ); - assert!( - state.known_nullifiers.contains(&nullifier2), - "known_nullifiers should contain second note's nullifier" - ); - assert_eq!( - state.known_nullifiers.len(), - 2, - "known_nullifiers should have exactly 2 entries" - ); - } - - /// Tests that when a `TransactionAdded` event arrives with nullifiers from initial notes, - /// those notes are properly moved from `available_notes` to `nullified_notes`. - #[test] - fn test_mempool_event_nullifies_initial_notes() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let note2 = to_single_target_note(create_network_note(account_id, 2)); - let nullifier1 = note1.nullifier(); - let nullifier2 = note2.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - - let available_count = state.notes.available_notes(&BlockNumber::from(0)).count(); - assert_eq!(available_count, 2, "both notes should be available initially"); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - - let shutdown = state.mempool_update(&event); - assert!(shutdown.is_none(), "mempool_update should not trigger shutdown"); - - let available_nullifiers: Vec<_> = state - .notes - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - !available_nullifiers.contains(&nullifier1), - "note1 should no longer be available" - ); - assert!(available_nullifiers.contains(&nullifier2), "note2 should still be available"); - assert_eq!(available_nullifiers.len(), 1, "only one note should be available"); - - assert!( - state.inflight_txs.contains_key(&tx_id), - "transaction should be tracked in inflight_txs" - ); - } - - /// Tests that after committing a transaction, the nullifier is removed from `known_nullifiers`. - #[test] - fn test_commit_removes_nullifier_from_index() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let nullifier1 = note1.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1]); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - state.mempool_update(&event); - - assert!( - state.known_nullifiers.contains(&nullifier1), - "nullifier should still be in index while transaction is inflight" - ); - - let commit_event = MempoolEvent::BlockCommitted { - header: Box::new(mock_block_header(1)), - txs: vec![tx_id], - }; - state.mempool_update(&commit_event); - - assert!( - !state.known_nullifiers.contains(&nullifier1), - "nullifier should be removed from index after commit" - ); - } - - /// Tests that reverting a transaction restores the note to `available_notes`. - #[test] - fn test_revert_restores_note_to_available() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let nullifier1 = note1.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1]); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - state.mempool_update(&event); - - // Verify note is not available - let available_count = state.notes.available_notes(&BlockNumber::from(0)).count(); - assert_eq!(available_count, 0, "note should not be available after being consumed"); - - // Revert the transaction - let revert_event = - MempoolEvent::TransactionsReverted(HashSet::from_iter(std::iter::once(tx_id))); - state.mempool_update(&revert_event); - - // Verify note is available again - let available_nullifiers: Vec<_> = state - .notes - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - available_nullifiers.contains(&nullifier1), - "note should be available again after revert" - ); - } - - /// Tests that nullifiers from dynamically added notes are also indexed. - #[test] - fn test_dynamically_added_notes_are_indexed() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let mut state = NetworkAccountState::new_for_testing(account, network_account_id, vec![]); - - assert!(state.known_nullifiers.is_empty(), "known_nullifiers should be empty initially"); - - let new_note = to_single_target_note(create_network_note(account_id, 1)); - let new_nullifier = new_note.nullifier(); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![], - network_notes: vec![NetworkNote::SingleTarget(new_note)], - account_delta: None, - }; - - state.mempool_update(&event); - - // Verify the new note's nullifier is now indexed - assert!( - state.known_nullifiers.contains(&new_nullifier), - "dynamically added note's nullifier should be indexed" - ); - - // Verify the note is available - let available_nullifiers: Vec<_> = state - .notes - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - available_nullifiers.contains(&new_nullifier), - "dynamically added note should be available" - ); - } -} diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs index 23c7d06d7..4cc080862 100644 --- a/crates/ntx-builder/src/actor/inflight_note.rs +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -29,6 +29,15 @@ impl InflightNetworkNote { } } + /// Reconstructs an inflight network note from its constituent parts (e.g., from DB rows). + pub fn from_parts( + note: SingleTargetNetworkNote, + attempt_count: usize, + last_attempt: Option, + ) -> Self { + Self { note, attempt_count, last_attempt } + } + /// Consumes the inflight network note and returns the inner network note. pub fn into_inner(self) -> SingleTargetNetworkNote { self.note diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index c5ecc2ccd..3b94bd8c3 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -1,13 +1,13 @@ +pub(crate) mod account_effect; pub mod account_state; mod execute; -mod inflight_note; -mod note_state; +pub(crate) mod inflight_note; use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; -use account_state::{NetworkAccountState, TransactionCandidate}; +use account_state::TransactionCandidate; use futures::FutureExt; use miden_node_proto::clients::{Builder, ValidatorClient}; use miden_node_proto::domain::account::NetworkAccountId; @@ -17,7 +17,7 @@ use miden_node_utils::lru_cache::LruCache; use miden_protocol::Word; use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::NoteScript; +use miden_protocol::note::{Note, NoteScript}; use miden_protocol::transaction::TransactionId; use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; @@ -26,6 +26,7 @@ use url::Url; use crate::block_producer::BlockProducerClient; use crate::builder::ChainState; +use crate::db::Db; use crate::store::StoreClient; // ACTOR SHUTDOWN REASON @@ -33,8 +34,6 @@ use crate::store::StoreClient; /// The reason an actor has shut down. pub enum ActorShutdownReason { - /// Occurs when the transaction that created the actor is reverted. - AccountReverted(NetworkAccountId), /// Occurs when an account actor detects failure in the messaging channel used by the /// coordinator. EventChannelClosed, @@ -71,6 +70,8 @@ pub struct AccountActorContext { pub max_notes_per_tx: NonZeroUsize, /// Maximum number of note execution attempts before dropping a note. pub max_note_attempts: usize, + /// Database for persistent state. + pub db: Db, } // ACCOUNT ORIGIN @@ -132,10 +133,10 @@ enum ActorMode { /// /// ## Core Responsibilities /// -/// - **State Management**: Loads and maintains the current state of network accounts, including -/// available notes, pending transactions, and account commitments. +/// - **State Management**: Queries the database for the current state of network accounts, +/// including available notes and the latest account state. /// - **Transaction Selection**: Selects viable notes and constructs a [`TransactionCandidate`] -/// based on current chain state. +/// based on current chain state and DB queries. /// - **Transaction Execution**: Executes selected transactions using either local or remote /// proving. /// - **Mempool Integration**: Listens for mempool events to stay synchronized with the network @@ -143,11 +144,12 @@ enum ActorMode { /// /// ## Lifecycle /// -/// 1. **Initialization**: Loads account state from the store or uses provided account data. +/// 1. **Initialization**: Checks DB for available notes to determine initial mode. /// 2. **Event Loop**: Continuously processes mempool events and executes transactions. /// 3. **Transaction Processing**: Selects, executes, and proves transactions, and submits them to /// block producer. -/// 4. **State Updates**: Updates internal state based on mempool events and execution results. +/// 4. **State Updates**: Event effects are persisted to DB by the coordinator before actors are +/// notified. /// 5. **Shutdown**: Terminates gracefully when cancelled or encounters unrecoverable errors. /// /// ## Concurrency @@ -158,6 +160,7 @@ enum ActorMode { pub struct AccountActor { origin: AccountOrigin, store: StoreClient, + db: Db, mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, @@ -193,6 +196,7 @@ impl AccountActor { Self { origin, store: actor_context.store.clone(), + db: actor_context.db.clone(), mode: ActorMode::NoViableNotes, event_rx, cancel_token, @@ -209,23 +213,19 @@ impl AccountActor { /// Runs the account actor, processing events and managing state until a reason to shutdown is /// encountered. pub async fn run(mut self, semaphore: Arc) -> ActorShutdownReason { - // Load the account state from the store and set up the account actor state. - let account = { - match self.origin { - AccountOrigin::Store(account_id) => self - .store - .get_network_account(account_id) - .await - .expect("actor should be able to load account") - .expect("actor account should exist"), - AccountOrigin::Transaction(ref account) => *(account.clone()), - } - }; + let account_id = self.origin.id(); + + // Determine initial mode by checking DB for available notes. let block_num = self.chain_state.read().await.chain_tip_header.block_num(); - let mut state = - NetworkAccountState::load(account, self.origin.id(), &self.store, block_num) - .await - .expect("actor should be able to load account state"); + let has_notes = self + .db + .has_available_notes(account_id, block_num, self.max_note_attempts) + .await + .expect("actor should be able to check for available notes"); + + if has_notes { + self.mode = ActorMode::NotesAvailable; + } loop { // Enable or disable transaction execution based on actor mode. @@ -239,28 +239,31 @@ impl AccountActor { }; tokio::select! { _ = self.cancel_token.cancelled() => { - return ActorShutdownReason::Cancelled(self.origin.id()); + return ActorShutdownReason::Cancelled(account_id); } // Handle mempool events. event = self.event_rx.recv() => { let Some(event) = event else { return ActorShutdownReason::EventChannelClosed; }; - // Re-enable transaction execution if the transaction being waited on has been - // added to the mempool. + // Re-enable transaction execution if the transaction being waited on has + // been resolved (added to mempool, committed in a block, or reverted). if let ActorMode::TransactionInflight(awaited_id) = self.mode { - if let MempoolEvent::TransactionAdded { id, .. } = *event { - if id == awaited_id { - self.mode = ActorMode::NotesAvailable; - } + let should_wake = match event.as_ref() { + MempoolEvent::TransactionAdded { id, .. } => *id == awaited_id, + MempoolEvent::BlockCommitted { txs, .. } => { + txs.contains(&awaited_id) + }, + MempoolEvent::TransactionsReverted(tx_ids) => { + tx_ids.contains(&awaited_id) + }, + }; + if should_wake { + self.mode = ActorMode::NotesAvailable; } } else { self.mode = ActorMode::NotesAvailable; } - // Update state. - if let Some(shutdown_reason) = state.mempool_update(event.as_ref()) { - return shutdown_reason; - } }, // Execute transactions. permit = tx_permit_acquisition => { @@ -268,13 +271,20 @@ impl AccountActor { Ok(_permit) => { // Read the chain state. let chain_state = self.chain_state.read().await.clone(); - // Find a candidate transaction and execute it. - if let Some(tx_candidate) = state.select_candidate( - self.max_notes_per_tx, - self.max_note_attempts, + + // Drop notes that have failed too many times. + if let Err(err) = self.db.drop_failing_notes(account_id, self.max_note_attempts).await { + tracing::error!(err = %err, "failed to drop failing notes"); + } + + // Query DB for latest account and available notes. + let tx_candidate = self.select_candidate_from_db( + account_id, chain_state, - ) { - self.execute_transactions(&mut state, tx_candidate).await; + ).await; + + if let Some(tx_candidate) = tx_candidate { + self.execute_transactions(account_id, tx_candidate).await; } else { // No transactions to execute, wait for events. self.mode = ActorMode::NoViableNotes; @@ -289,13 +299,44 @@ impl AccountActor { } } + /// Selects a transaction candidate by querying the DB. + async fn select_candidate_from_db( + &self, + account_id: NetworkAccountId, + chain_state: ChainState, + ) -> Option { + let block_num = chain_state.chain_tip_header.block_num(); + let max_notes = self.max_notes_per_tx.get(); + + let (latest_account, notes) = self + .db + .select_candidate(account_id, block_num, self.max_note_attempts) + .await + .expect("actor should be able to query DB for candidate"); + + let account = latest_account?; + + let notes: Vec<_> = notes.into_iter().take(max_notes).collect(); + if notes.is_empty() { + return None; + } + + let (chain_tip_header, chain_mmr) = chain_state.into_parts(); + Some(TransactionCandidate { + account, + notes, + chain_tip_header, + chain_mmr, + }) + } + /// Execute a transaction candidate and mark notes as failed as required. /// /// Updates the state of the actor based on the execution result. - #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, state, tx_candidate))] + #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, tx_candidate))] async fn execute_transactions( &mut self, - state: &mut NetworkAccountState, + account_id: NetworkAccountId, tx_candidate: TransactionCandidate, ) { let block_num = tx_candidate.chain_tip_header.block_num(); @@ -318,20 +359,34 @@ impl AccountActor { }, // Execution completed with some failed notes. Ok((tx_id, failed)) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(notes.as_slice(), block_num); + let nullifiers: Vec<_> = + failed.into_iter().map(|note| note.note.nullifier()).collect(); + self.mark_notes_failed(&nullifiers, block_num).await; self.mode = ActorMode::TransactionInflight(tx_id); }, // Transaction execution failed. Err(err) => { tracing::error!(err = err.as_report(), "network transaction failed"); self.mode = ActorMode::NoViableNotes; - let notes = - notes.into_iter().map(|note| note.into_inner().into()).collect::>(); - state.notes_failed(notes.as_slice(), block_num); + let nullifiers: Vec<_> = notes + .into_iter() + .map(|note| Note::from(note.into_inner()).nullifier()) + .collect(); + self.mark_notes_failed(&nullifiers, block_num).await; }, } } + + /// Marks notes as failed in the DB. + async fn mark_notes_failed( + &self, + nullifiers: &[miden_protocol::note::Nullifier], + block_num: BlockNumber, + ) { + if let Err(err) = self.db.notes_failed(nullifiers.to_vec(), block_num).await { + tracing::error!(err = %err, "failed to mark notes as failed"); + } + } } // HELPERS @@ -368,3 +423,34 @@ fn has_backoff_passed( // Check if the backoff period has passed. blocks_passed.as_usize() > backoff_threshold } + +#[cfg(test)] +mod tests { + use miden_protocol::block::BlockNumber; + + use super::has_backoff_passed; + + #[rstest::rstest] + #[test] + #[case::all_zero(Some(BlockNumber::GENESIS), BlockNumber::GENESIS, 0, true)] + #[case::no_attempts(None, BlockNumber::GENESIS, 0, true)] + #[case::one_attempt(Some(BlockNumber::GENESIS), BlockNumber::from(2), 1, true)] + #[case::three_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(3), 3, true)] + #[case::ten_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(13), 10, true)] + #[case::twenty_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(149), 20, true)] + #[case::one_attempt_false(Some(BlockNumber::GENESIS), BlockNumber::from(1), 1, false)] + #[case::three_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(2), 3, false)] + #[case::ten_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(12), 10, false)] + #[case::twenty_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(148), 20, false)] + fn backoff_has_passed( + #[case] last_attempt_block_num: Option, + #[case] current_block_num: BlockNumber, + #[case] attempt_count: usize, + #[case] backoff_should_have_passed: bool, + ) { + assert_eq!( + backoff_should_have_passed, + has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) + ); + } +} diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs deleted file mode 100644 index 610334c67..000000000 --- a/crates/ntx-builder/src/actor/note_state.rs +++ /dev/null @@ -1,235 +0,0 @@ -use std::collections::{HashMap, VecDeque}; - -use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::account::{Account, AccountDelta, AccountId}; -use miden_protocol::block::BlockNumber; -use miden_protocol::note::Nullifier; - -use crate::actor::inflight_note::InflightNetworkNote; - -// ACCOUNT DELTA TRACKER -// ================================================================================================ - -/// Tracks committed and inflight account state updates. -#[derive(Clone)] -pub struct AccountDeltaTracker { - /// The committed account state, if any. - /// - /// This may be `None` if the account creation transaction is still inflight. - committed: Option, - - /// Inflight account updates in chronological order. - inflight: VecDeque, -} - -impl AccountDeltaTracker { - /// Creates a new tracker with the given committed account state. - pub fn new(account: Account) -> Self { - Self { - committed: Some(account), - inflight: VecDeque::default(), - } - } - - /// Appends a delta to the set of inflight account updates. - pub fn add_delta(&mut self, delta: &AccountDelta) { - let mut state = self.latest_account(); - state - .apply_delta(delta) - .expect("network account delta should apply since it was accepted by the mempool"); - - self.inflight.push_back(state); - } - - /// Commits the oldest account state delta. - /// - /// # Panics - /// - /// Panics if there are no deltas to commit. - pub fn commit_delta(&mut self) { - self.committed = self.inflight.pop_front().expect("must have a delta to commit").into(); - } - - /// Reverts the newest account state delta. - /// - /// Returns `true` if this reverted the account creation delta. The caller _must_ handle - /// cleanup as calls to `latest_account` will panic afterwards. - /// - /// # Panics - /// - /// Panics if there are no deltas to revert. - #[must_use = "must handle account removal if this returns true"] - pub fn revert_delta(&mut self) -> bool { - self.inflight.pop_back().expect("must have a delta to revert"); - self.committed.is_none() && self.inflight.is_empty() - } - - /// Returns the latest inflight account state. - pub fn latest_account(&self) -> Account { - self.inflight - .back() - .or(self.committed.as_ref()) - .expect("account must have either a committed or inflight state") - .clone() - } - - /// Returns `true` if there are no inflight deltas. - pub fn has_no_inflight(&self) -> bool { - self.inflight.is_empty() - } -} - -// NOTE POOL -// ================================================================================================ - -/// Manages available and nullified notes for a network account. -#[derive(Clone, Default)] -pub struct NotePool { - /// Unconsumed notes available for consumption. - available: HashMap, - - /// Notes consumed by inflight transactions (not yet committed). - nullified: HashMap, -} - -impl NotePool { - /// Returns an iterator over notes that are available and not in backoff. - pub fn available_notes( - &self, - block_num: &BlockNumber, - ) -> impl Iterator { - self.available.values().filter(|¬e| note.is_available(*block_num)) - } - - /// Adds a new network note making it available for consumption. - pub fn add_note(&mut self, note: SingleTargetNetworkNote) { - self.available.insert(note.nullifier(), InflightNetworkNote::new(note)); - } - - /// Removes the note completely (used when reverting note creation). - pub fn remove_note(&mut self, nullifier: Nullifier) { - self.available.remove(&nullifier); - self.nullified.remove(&nullifier); - } - - /// Marks a note as being consumed by moving it to the nullified set. - /// - /// Returns `Err(())` if the note does not exist or was already nullified. - pub fn nullify(&mut self, nullifier: Nullifier) -> Result<(), ()> { - if let Some(note) = self.available.remove(&nullifier) { - self.nullified.insert(nullifier, note); - Ok(()) - } else { - tracing::warn!(%nullifier, "note must be available to nullify"); - Err(()) - } - } - - /// Commits a nullifier, removing the associated note entirely. - /// - /// Silently ignores if the nullifier is not present. - pub fn commit_nullifier(&mut self, nullifier: Nullifier) { - let _ = self.nullified.remove(&nullifier); - } - - /// Reverts a nullifier, making the note available again. - pub fn revert_nullifier(&mut self, nullifier: Nullifier) { - // Transactions can be reverted out of order. - if let Some(note) = self.nullified.remove(&nullifier) { - self.available.insert(nullifier, note); - } - } - - /// Drops all notes that have exceeded the maximum attempt count. - pub fn drop_failing_notes(&mut self, max_attempts: usize) { - self.available.retain(|_, note| note.attempt_count() < max_attempts); - } - - /// Marks the specified notes as failed. - pub fn fail_notes(&mut self, nullifiers: &[Nullifier], block_num: BlockNumber) { - for nullifier in nullifiers { - if let Some(note) = self.available.get_mut(nullifier) { - note.fail(block_num); - } else { - tracing::warn!(%nullifier, "failed note is not in account's state"); - } - } - } - - /// Returns `true` if there are no notes being tracked. - pub fn is_empty(&self) -> bool { - self.available.is_empty() && self.nullified.is_empty() - } -} - -// NETWORK ACCOUNT EFFECT -// ================================================================================================ - -/// Represents the effect of a transaction on a network account. -#[derive(Clone)] -pub enum NetworkAccountEffect { - Created(Account), - Updated(AccountDelta), -} - -impl NetworkAccountEffect { - pub fn from_protocol(update: &AccountUpdateDetails) -> Option { - let update = match update { - AccountUpdateDetails::Private => return None, - AccountUpdateDetails::Delta(update) if update.is_full_state() => { - NetworkAccountEffect::Created( - Account::try_from(update) - .expect("Account should be derivable by full state AccountDelta"), - ) - }, - AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), - }; - - update.protocol_account_id().is_network().then_some(update) - } - - pub fn network_account_id(&self) -> NetworkAccountId { - // SAFETY: This is a network account by construction. - self.protocol_account_id().try_into().unwrap() - } - - fn protocol_account_id(&self) -> AccountId { - match self { - NetworkAccountEffect::Created(acc) => acc.id(), - NetworkAccountEffect::Updated(delta) => delta.id(), - } - } -} - -#[cfg(test)] -mod tests { - use miden_protocol::block::BlockNumber; - - #[rstest::rstest] - #[test] - #[case::all_zero(Some(BlockNumber::GENESIS), BlockNumber::GENESIS, 0, true)] - #[case::no_attempts(None, BlockNumber::GENESIS, 0, true)] - #[case::one_attempt(Some(BlockNumber::GENESIS), BlockNumber::from(2), 1, true)] - #[case::three_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(3), 3, true)] - #[case::ten_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(13), 10, true)] - #[case::twenty_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(149), 20, true)] - #[case::one_attempt_false(Some(BlockNumber::GENESIS), BlockNumber::from(1), 1, false)] - #[case::three_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(2), 3, false)] - #[case::ten_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(12), 10, false)] - #[case::twenty_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(148), 20, false)] - fn backoff_has_passed( - #[case] last_attempt_block_num: Option, - #[case] current_block_num: BlockNumber, - #[case] attempt_count: usize, - #[case] backoff_should_have_passed: bool, - ) { - use crate::actor::has_backoff_passed; - - assert_eq!( - backoff_should_have_passed, - has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) - ); - } -} diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 14be4ef31..b642d0379 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -16,6 +16,7 @@ use tonic::Status; use crate::NtxBuilderConfig; use crate::actor::{AccountActorContext, AccountOrigin}; use crate::coordinator::Coordinator; +use crate::db::Db; use crate::store::StoreClient; // CHAIN STATE @@ -89,6 +90,8 @@ pub struct NetworkTransactionBuilder { coordinator: Coordinator, /// Client for the store gRPC API. store: StoreClient, + /// Database for persistent state. + db: Db, /// Shared chain state updated by the event loop and read by actors. chain_state: Arc>, /// Context shared with all account actors. @@ -102,6 +105,7 @@ impl NetworkTransactionBuilder { config: NtxBuilderConfig, coordinator: Coordinator, store: StoreClient, + db: Db, chain_state: Arc>, actor_context: AccountActorContext, mempool_events: MempoolEventStream, @@ -110,6 +114,7 @@ impl NetworkTransactionBuilder { config, coordinator, store, + db, chain_state, actor_context, mempool_events, @@ -177,19 +182,48 @@ impl NetworkTransactionBuilder { } } - /// Handles account IDs loaded from the store by spawning actors for them. + /// Handles account IDs loaded from the store by syncing state to DB and spawning actors. #[tracing::instrument(name = "ntx.builder.handle_loaded_account", skip(self, account_id))] async fn handle_loaded_account( &mut self, account_id: NetworkAccountId, ) -> Result<(), anyhow::Error> { + // Fetch account from store and write to DB. + let account = self + .store + .get_network_account(account_id) + .await + .context("failed to load account from store")? + .context("account should exist in store")?; + + let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let notes = self + .store + .get_unconsumed_network_notes(account_id, block_num.as_u32()) + .await + .context("failed to load notes from store")?; + + let notes: Vec<_> = notes + .into_iter() + .map(|n| { + let miden_node_proto::domain::note::NetworkNote::SingleTarget(note) = n; + note + }) + .collect(); + + // Write account and notes to DB. + self.db + .sync_account_from_store(account_id, account.clone(), notes.clone()) + .await + .context("failed to sync account to DB")?; + self.coordinator .spawn_actor(AccountOrigin::store(account_id), &self.actor_context) .await?; Ok(()) } - /// Handles mempool events by routing them to actors and spawning new actors as needed. + /// Handles mempool events by writing to DB first, then routing to actors. #[tracing::instrument(name = "ntx.builder.handle_mempool_event", skip(self, event))] async fn handle_mempool_event( &mut self, @@ -197,6 +231,12 @@ impl NetworkTransactionBuilder { ) -> Result<(), anyhow::Error> { match event.as_ref() { MempoolEvent::TransactionAdded { account_delta, .. } => { + // Write event effects to DB first. + self.coordinator + .write_event(&event) + .await + .context("failed to write TransactionAdded to DB")?; + // Handle account deltas in case an account is being created. if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { // Handle account deltas for network accounts only. @@ -214,24 +254,31 @@ impl NetworkTransactionBuilder { Ok(()) }, // Update chain state and broadcast. - MempoolEvent::BlockCommitted { header, txs } => { + MempoolEvent::BlockCommitted { header, .. } => { + // Write event effects to DB first. + self.coordinator + .write_event(&event) + .await + .context("failed to write BlockCommitted to DB")?; + self.update_chain_tip(header.as_ref().clone()).await; self.coordinator.broadcast(event.clone()).await; - - // All transactions pertaining to predating events should now be available - // through the store. So we can now drain them. - for tx_id in txs { - self.coordinator.drain_predating_events(tx_id); - } Ok(()) }, // Broadcast to all actors. - MempoolEvent::TransactionsReverted(txs) => { + MempoolEvent::TransactionsReverted(_) => { + // Write event effects to DB first; returns reverted account IDs. + let reverted_accounts = self + .coordinator + .write_event(&event) + .await + .context("failed to write TransactionsReverted to DB")?; + self.coordinator.broadcast(event.clone()).await; - // Reverted predating transactions need not be processed. - for tx_id in txs { - self.coordinator.drain_predating_events(tx_id); + // Cancel actors for reverted account creations. + for account_id in &reverted_accounts { + self.coordinator.cancel_actor(account_id); } Ok(()) }, diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 673c40106..a857bdc64 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -2,18 +2,18 @@ use std::collections::HashMap; use std::sync::Arc; use anyhow::Context; -use indexmap::IndexMap; +use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::NetworkNote; +use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::transaction::TransactionId; use tokio::sync::mpsc::error::SendError; use tokio::sync::{Semaphore, mpsc}; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use crate::actor::{AccountActor, AccountActorContext, AccountOrigin, ActorShutdownReason}; +use crate::db::Db; // ACTOR HANDLE // ================================================================================================ @@ -87,9 +87,8 @@ pub struct Coordinator { /// ensuring fair resource allocation and system stability under load. semaphore: Arc, - /// Cache of events received from the mempool that predate corresponding network accounts. - /// Grouped by network account ID to allow targeted event delivery to actors upon creation. - predating_events: HashMap>>, + /// Database for persistent state. + db: Db, /// Channel size for each actor's event channel. actor_channel_size: usize, @@ -98,12 +97,12 @@ pub struct Coordinator { impl Coordinator { /// Creates a new coordinator with the specified maximum number of inflight transactions /// and actor channel size. - pub fn new(max_inflight_transactions: usize, actor_channel_size: usize) -> Self { + pub fn new(max_inflight_transactions: usize, actor_channel_size: usize, db: Db) -> Self { Self { actor_registry: HashMap::new(), actor_join_set: JoinSet::new(), semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), - predating_events: HashMap::new(), + db, actor_channel_size, } } @@ -135,17 +134,10 @@ impl Coordinator { let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); let handle = ActorHandle::new(event_tx, cancel_token); - // Run the actor. + // Run the actor. Actor reads state from DB on startup. let semaphore = self.semaphore.clone(); self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); - // Send the new actor any events that contain notes that predate account creation. - if let Some(predating_events) = self.predating_events.remove(&account_id) { - for event in predating_events.values() { - Self::send(&handle, event.clone()).await?; - } - } - self.actor_registry.insert(account_id, handle); tracing::info!(account_id = %account_id, "Created actor for account prefix"); Ok(()) @@ -202,11 +194,6 @@ impl Coordinator { tracing::info!(account_id = %account_id, "Account actor cancelled"); Ok(()) }, - ActorShutdownReason::AccountReverted(account_id) => { - tracing::info!(account_id = %account_id, "Account reverted"); - self.actor_registry.remove(&account_id); - Ok(()) - }, ActorShutdownReason::EventChannelClosed => { anyhow::bail!("event channel closed"); }, @@ -226,19 +213,15 @@ impl Coordinator { /// Sends a mempool event to all network account actors that are found in the corresponding /// transaction's notes. /// - /// Caches the mempool event for each network account found in the transaction's notes that does - /// not currently have a corresponding actor. If an actor does not exist for the account, it is - /// assumed that the account has not been created on the chain yet. - /// - /// Cached events will be fed to the corresponding actor when the account creation transaction - /// is processed. + /// Events are sent only to actors that are currently active. Since event effects are already + /// persisted in the DB by `write_event()`, actors that spawn later read their state from the + /// DB and do not need predating events. pub async fn send_targeted( &mut self, event: &Arc, ) -> Result<(), SendError>> { let mut target_actors = HashMap::new(); - if let MempoolEvent::TransactionAdded { id, network_notes, account_delta, .. } = - event.as_ref() + if let MempoolEvent::TransactionAdded { network_notes, account_delta, .. } = event.as_ref() { // We need to inform the account if it was updated. This lets it know that its own // transaction has been applied, and in the future also resolves race conditions with @@ -259,14 +242,7 @@ impl Coordinator { let NetworkNote::SingleTarget(note) = note; let network_account_id = note.account_id(); if let Some(actor) = self.actor_registry.get(&network_account_id) { - // Register actor as target. target_actors.insert(network_account_id, actor); - } else { - // Cache event for every note that doesn't have a corresponding actor. - self.predating_events - .entry(network_account_id) - .or_default() - .insert(*id, event.clone()); } } } @@ -277,16 +253,55 @@ impl Coordinator { Ok(()) } - /// Removes any cached events for a given transaction ID from all account caches. - pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { - // Remove the transaction from all account caches. - // This iterates over all predating events which is fine because the count is expected to be - // low. - self.predating_events.retain(|_, account_events| { - account_events.shift_remove(tx_id); - // Remove entries for accounts with no more cached events. - !account_events.is_empty() - }); + /// Writes mempool event effects to the database. + /// + /// This must be called BEFORE sending notifications to actors. For `TransactionsReverted`, + /// returns the list of account IDs whose creation was reverted. + pub async fn write_event( + &self, + event: &MempoolEvent, + ) -> Result, DatabaseError> { + match event { + MempoolEvent::TransactionAdded { + id, + nullifiers, + network_notes, + account_delta, + } => { + let notes: Vec = network_notes + .iter() + .map(|n| { + let NetworkNote::SingleTarget(note) = n; + note.clone() + }) + .collect(); + + self.db + .handle_transaction_added(*id, account_delta.clone(), notes, nullifiers.clone()) + .await?; + Ok(Vec::new()) + }, + MempoolEvent::BlockCommitted { header, txs } => { + self.db + .handle_block_committed( + txs.clone(), + header.block_num(), + header.as_ref().clone(), + ) + .await?; + Ok(Vec::new()) + }, + MempoolEvent::TransactionsReverted(tx_ids) => { + self.db.handle_transactions_reverted(tx_ids.iter().copied().collect()).await + }, + } + } + + /// Cancels an actor by its account ID. + pub fn cancel_actor(&mut self, account_id: &NetworkAccountId) { + if let Some(handle) = self.actor_registry.remove(account_id) { + handle.cancel_token.cancel(); + } } /// Helper function to send an event to a single account actor. diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql index 2588a85bd..d8da128a9 100644 --- a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql @@ -2,7 +2,7 @@ -- The chain MMR is reconstructed on startup from the store and maintained in memory. CREATE TABLE chain_state ( -- Singleton constraint: only one row allowed. - id INTEGER PRIMARY KEY CHECK (id = 0), + id INTEGER NOT NULL PRIMARY KEY CHECK (id = 0), -- Block number of the chain tip. block_num INTEGER NOT NULL, -- Serialized BlockHeader. @@ -16,7 +16,7 @@ CREATE TABLE chain_state ( -- The auto-incrementing order_id preserves insertion order (VecDeque semantics). CREATE TABLE accounts ( -- Auto-incrementing ID preserves insertion order. - order_id INTEGER PRIMARY KEY AUTOINCREMENT, + order_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, -- AccountId serialized bytes (8 bytes). account_id BLOB NOT NULL, -- Serialized Account state. @@ -27,6 +27,9 @@ CREATE TABLE accounts ( -- At most one committed row per account. CREATE UNIQUE INDEX idx_accounts_committed ON accounts(account_id) WHERE transaction_id IS NULL; +-- At most one inflight row per (account, transaction) pair. +CREATE UNIQUE INDEX idx_accounts_inflight ON accounts(account_id, transaction_id) + WHERE transaction_id IS NOT NULL; CREATE INDEX idx_accounts_account ON accounts(account_id); CREATE INDEX idx_accounts_tx ON accounts(transaction_id) WHERE transaction_id IS NOT NULL; diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs index 3d1c27bee..40709de7c 100644 --- a/crates/ntx-builder/src/db/mod.rs +++ b/crates/ntx-builder/src/db/mod.rs @@ -1,5 +1,213 @@ +use std::path::PathBuf; + +use anyhow::Context; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::account::Account; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; +use tracing::{info, instrument}; + +use crate::COMPONENT; +use crate::actor::inflight_note::InflightNetworkNote; +use crate::db::migrations::apply_migrations; +use crate::db::models::queries; + +pub(crate) mod models; + mod migrations; mod schema_hash; /// [diesel](https://diesel.rs) generated schema. pub(crate) mod schema; + +pub type Result = std::result::Result; + +#[derive(Clone)] +pub struct Db { + inner: miden_node_db::Db, +} + +impl Db { + /// Creates and initializes the database, then opens an async connection pool. + #[instrument( + target = COMPONENT, + name = "ntx_builder.database.setup", + skip_all, + fields(path=%database_filepath.display()), + err, + )] + pub async fn setup(database_filepath: PathBuf) -> anyhow::Result { + let inner = miden_node_db::Db::new(&database_filepath) + .context("failed to build connection pool")?; + + info!( + target: COMPONENT, + sqlite = %database_filepath.display(), + "Connected to the database" + ); + + let me = Db { inner }; + me.inner + .query("migrations", apply_migrations) + .await + .context("failed to apply migrations on pool connection")?; + Ok(me) + } + + // PUBLIC QUERY METHODS + // ============================================================================================ + + /// Returns `true` if there are notes available for consumption by the given account. + pub async fn has_available_notes( + &self, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_attempts: usize, + ) -> Result { + self.inner + .query("has_available_notes", move |conn| { + let notes = queries::available_notes(conn, account_id, block_num, max_attempts)?; + Ok(!notes.is_empty()) + }) + .await + } + + /// Drops notes for the given account that have exceeded the maximum attempt count. + pub async fn drop_failing_notes( + &self, + account_id: NetworkAccountId, + max_attempts: usize, + ) -> Result<()> { + self.inner + .transact("drop_failing_notes", move |conn| { + queries::drop_failing_notes(conn, account_id, max_attempts) + }) + .await + } + + /// Returns the latest account state and available notes for the given account. + pub async fn select_candidate( + &self, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_note_attempts: usize, + ) -> Result<(Option, Vec)> { + self.inner + .query("select_candidate", move |conn| { + let account = queries::get_account(conn, account_id)?; + let notes = + queries::available_notes(conn, account_id, block_num, max_note_attempts)?; + Ok((account, notes)) + }) + .await + } + + /// Marks notes as failed by incrementing `attempt_count` and setting `last_attempt`. + pub async fn notes_failed( + &self, + nullifiers: Vec, + block_num: BlockNumber, + ) -> Result<()> { + self.inner + .transact("notes_failed", move |conn| { + queries::notes_failed(conn, &nullifiers, block_num) + }) + .await + } + + /// Handles a `TransactionAdded` mempool event by writing effects to the DB. + pub async fn handle_transaction_added( + &self, + tx_id: TransactionId, + account_delta: Option, + notes: Vec, + nullifiers: Vec, + ) -> Result<()> { + self.inner + .transact("handle_transaction_added", move |conn| { + queries::add_transaction(conn, &tx_id, account_delta.as_ref(), ¬es, &nullifiers) + }) + .await + } + + /// Handles a `BlockCommitted` mempool event by committing transaction effects. + pub async fn handle_block_committed( + &self, + txs: Vec, + block_num: BlockNumber, + header: BlockHeader, + ) -> Result<()> { + self.inner + .transact("handle_block_committed", move |conn| { + queries::commit_block(conn, &txs, block_num, &header) + }) + .await + } + + /// Handles a `TransactionsReverted` mempool event by undoing transaction effects. + /// + /// Returns the list of account IDs whose creation was reverted. + pub async fn handle_transactions_reverted( + &self, + tx_ids: Vec, + ) -> Result> { + self.inner + .transact("handle_transactions_reverted", move |conn| { + queries::revert_transaction(conn, &tx_ids) + }) + .await + } + + /// Purges all inflight state. Called on startup to get a clean slate. + pub async fn purge_inflight(&self) -> Result<()> { + self.inner.transact("purge_inflight", queries::purge_inflight).await + } + + /// Inserts or replaces the singleton chain state row. + pub async fn upsert_chain_state( + &self, + block_num: BlockNumber, + header: BlockHeader, + ) -> Result<()> { + self.inner + .transact("upsert_chain_state", move |conn| { + queries::upsert_chain_state(conn, block_num, &header) + }) + .await + } + + /// Syncs an account and its notes from the store into the DB. + pub async fn sync_account_from_store( + &self, + account_id: NetworkAccountId, + account: Account, + notes: Vec, + ) -> Result<()> { + self.inner + .transact("sync_account_from_store", move |conn| { + queries::upsert_committed_account(conn, account_id, &account)?; + queries::insert_committed_notes(conn, ¬es)?; + Ok(()) + }) + .await + } + + /// Creates a file-backed SQLite test connection with migrations applied. + #[cfg(test)] + pub fn test_conn() -> (diesel::SqliteConnection, tempfile::TempDir) { + use diesel::{Connection, SqliteConnection}; + use miden_node_db::configure_connection_on_creation; + + let dir = tempfile::tempdir().expect("failed to create temp directory"); + let db_path = dir.path().join("test.sqlite3"); + let mut conn = SqliteConnection::establish(db_path.to_str().unwrap()) + .expect("temp file sqlite should always work"); + configure_connection_on_creation(&mut conn).expect("connection configuration should work"); + apply_migrations(&mut conn).expect("migrations should apply on empty database"); + (conn, dir) + } +} diff --git a/crates/ntx-builder/src/db/models/conv.rs b/crates/ntx-builder/src/db/models/conv.rs new file mode 100644 index 000000000..2a3299428 --- /dev/null +++ b/crates/ntx-builder/src/db/models/conv.rs @@ -0,0 +1,77 @@ +//! Conversions between Miden domain types and database column types. + +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_node_proto::generated as proto; +use miden_protocol::account::{Account, AccountId}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, Nullifier}; +use miden_protocol::transaction::TransactionId; +use miden_tx::utils::{Deserializable, Serializable}; +use prost::Message; + +// SERIALIZATION (domain → DB) +// ================================================================================================ + +pub fn account_to_bytes(account: &Account) -> Vec { + account.to_bytes() +} + +pub fn block_header_to_bytes(header: &BlockHeader) -> Vec { + header.to_bytes() +} + +pub fn network_account_id_to_bytes(id: NetworkAccountId) -> Vec { + id.inner().to_bytes() +} + +pub fn transaction_id_to_bytes(id: &TransactionId) -> Vec { + id.to_bytes() +} + +pub fn nullifier_to_bytes(nullifier: &Nullifier) -> Vec { + nullifier.to_bytes() +} + +pub fn block_num_to_i64(block_num: BlockNumber) -> i64 { + i64::from(block_num.as_u32()) +} + +#[expect(clippy::cast_sign_loss)] +pub fn block_num_from_i64(val: i64) -> BlockNumber { + BlockNumber::from(val as u32) +} + +/// Serializes a `SingleTargetNetworkNote` to bytes using its protobuf representation. +pub fn single_target_note_to_bytes(note: &SingleTargetNetworkNote) -> Vec { + let proto_note: proto::note::NetworkNote = Note::from(note.clone()).into(); + proto_note.encode_to_vec() +} + +// DESERIALIZATION (DB → domain) +// ================================================================================================ + +pub fn account_from_bytes(bytes: &[u8]) -> Result { + Account::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("account", e)) +} + +pub fn account_id_from_bytes(bytes: &[u8]) -> Result { + AccountId::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("account id", e)) +} + +pub fn network_account_id_from_bytes(bytes: &[u8]) -> Result { + let account_id = account_id_from_bytes(bytes)?; + NetworkAccountId::try_from(account_id) + .map_err(|e| DatabaseError::deserialization("network account id", e)) +} + +/// Deserializes a `SingleTargetNetworkNote` from its protobuf byte representation. +pub fn single_target_note_from_bytes( + bytes: &[u8], +) -> Result { + let proto_note = proto::note::NetworkNote::decode(bytes) + .map_err(|e| DatabaseError::deserialization("network note proto", e))?; + SingleTargetNetworkNote::try_from(proto_note) + .map_err(|e| DatabaseError::deserialization("network note conversion", e)) +} diff --git a/crates/ntx-builder/src/db/models/mod.rs b/crates/ntx-builder/src/db/models/mod.rs new file mode 100644 index 000000000..405fe0814 --- /dev/null +++ b/crates/ntx-builder/src/db/models/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod conv; + +pub mod queries; diff --git a/crates/ntx-builder/src/db/models/queries/accounts.rs b/crates/ntx-builder/src/db/models/queries/accounts.rs new file mode 100644 index 000000000..833f60ed8 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/accounts.rs @@ -0,0 +1,102 @@ +//! Account-related queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::Account; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +/// Row for inserting into the unified `accounts` table. +/// +/// `transaction_id = None` means committed; `Some(tx_id_bytes)` means inflight. +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::accounts)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct AccountInsert { + pub account_id: Vec, + pub account_data: Vec, + pub transaction_id: Option>, +} + +/// Row read from `accounts`. +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name = schema::accounts)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct AccountRow { + pub account_data: Vec, +} + +// QUERIES +// ================================================================================================ + +/// Inserts or replaces the committed account state (`transaction_id = NULL`). +/// +/// Deletes any existing committed row first, then inserts a fresh one. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM accounts WHERE account_id = ?1 AND transaction_id IS NULL +/// +/// INSERT INTO accounts (account_id, account_data, transaction_id) +/// VALUES (?1, ?2, NULL) +/// ``` +pub fn upsert_committed_account( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + account: &Account, +) -> Result<(), DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // Delete the existing committed row (if any). + diesel::delete( + schema::accounts::table + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::transaction_id.is_null()), + ) + .execute(conn)?; + + // Insert the new committed row. + let row = AccountInsert { + account_id: account_id_bytes, + account_data: conversions::account_to_bytes(account), + transaction_id: None, + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn)?; + Ok(()) +} + +/// Returns the latest account state: last inflight row (highest `order_id`), or committed if +/// none. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT account_data +/// FROM accounts +/// WHERE account_id = ?1 +/// ORDER BY order_id DESC +/// LIMIT 1 +/// ``` +pub fn get_account( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, +) -> Result, DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // ORDER BY order_id DESC returns the latest inflight first, then committed. + let row: Option = schema::accounts::table + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .order(schema::accounts::order_id.desc()) + .select(AccountRow::as_select()) + .first(conn) + .optional()?; + + row.map(|AccountRow { account_data, .. }| conversions::account_from_bytes(&account_data)) + .transpose() +} diff --git a/crates/ntx-builder/src/db/models/queries/chain_state.rs b/crates/ntx-builder/src/db/models/queries/chain_state.rs new file mode 100644 index 000000000..9b529cadc --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/chain_state.rs @@ -0,0 +1,46 @@ +//! Chain state queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_protocol::block::{BlockHeader, BlockNumber}; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::chain_state)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct ChainStateInsert { + /// Singleton row ID. Always `0` to satisfy the `CHECK (id = 0)` constraint. + pub id: i32, + pub block_num: i64, + pub block_header: Vec, +} + +// QUERIES +// ================================================================================================ + +/// Inserts or replaces the singleton chain state row. +/// +/// # Raw SQL +/// +/// ```sql +/// INSERT OR REPLACE INTO chain_state (id, block_num, block_header) +/// VALUES (0, ?1, ?2) +/// ``` +pub fn upsert_chain_state( + conn: &mut SqliteConnection, + block_num: BlockNumber, + block_header: &BlockHeader, +) -> Result<(), DatabaseError> { + let row = ChainStateInsert { + id: 0, + block_num: conversions::block_num_to_i64(block_num), + block_header: conversions::block_header_to_bytes(block_header), + }; + diesel::replace_into(schema::chain_state::table).values(&row).execute(conn)?; + Ok(()) +} diff --git a/crates/ntx-builder/src/db/models/queries/mod.rs b/crates/ntx-builder/src/db/models/queries/mod.rs new file mode 100644 index 000000000..fedcaabe0 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/mod.rs @@ -0,0 +1,316 @@ +//! Database query functions for the NTX builder. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; + +use crate::actor::account_effect::NetworkAccountEffect; +use crate::db::models::conv as conversions; +use crate::db::schema; + +mod accounts; +pub use accounts::*; + +mod chain_state; +pub use chain_state::*; + +mod notes; +pub use notes::*; + +#[cfg(test)] +mod tests; + +// STARTUP QUERIES +// ================================================================================================ + +/// Purges all inflight state. Called on startup to get a clean state. +/// +/// - Deletes account rows with `transaction_id IS NOT NULL`. +/// - Deletes note rows with `created_by IS NOT NULL`. +/// - Sets `consumed_by = NULL` on notes consumed by inflight transactions. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM accounts WHERE transaction_id IS NOT NULL +/// +/// DELETE FROM notes WHERE created_by IS NOT NULL +/// +/// UPDATE notes SET consumed_by = NULL WHERE consumed_by IS NOT NULL +/// ``` +pub fn purge_inflight(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + // Delete inflight account rows. + diesel::delete(schema::accounts::table.filter(schema::accounts::transaction_id.is_not_null())) + .execute(conn)?; + + // Delete inflight-created notes. + diesel::delete(schema::notes::table.filter(schema::notes::created_by.is_not_null())) + .execute(conn)?; + + // Un-nullify notes consumed by inflight transactions. + diesel::update(schema::notes::table.filter(schema::notes::consumed_by.is_not_null())) + .set(schema::notes::consumed_by.eq(None::>)) + .execute(conn)?; + + Ok(()) +} + +// MEMPOOL EVENT HANDLERS +// ================================================================================================ + +/// Handles a `TransactionAdded` event by writing effects to the DB. +/// +/// # Raw SQL +/// +/// For account updates (applies delta to latest state and inserts inflight row): +/// +/// ```sql +/// -- Fetch latest account (see latest_account) +/// INSERT INTO accounts (account_id, transaction_id, account_data) +/// VALUES (?1, ?2, ?3) +/// ``` +/// +/// Per note (idempotent via `INSERT OR IGNORE`): +/// +/// ```sql +/// INSERT OR IGNORE INTO notes +/// (nullifier, account_id, note_data, attempt_count, last_attempt, created_by, consumed_by) +/// VALUES (?1, ?2, ?3, 0, NULL, ?4, NULL) +/// ``` +/// +/// Per nullifier (marks notes as consumed): +/// +/// ```sql +/// UPDATE notes +/// SET consumed_by = ?1 +/// WHERE nullifier = ?2 AND consumed_by IS NULL +/// ``` +pub fn add_transaction( + conn: &mut SqliteConnection, + tx_id: &TransactionId, + account_delta: Option<&AccountUpdateDetails>, + notes: &[SingleTargetNetworkNote], + nullifiers: &[Nullifier], +) -> Result<(), DatabaseError> { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Process account delta. + if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { + let account_id = update.network_account_id(); + match update { + NetworkAccountEffect::Updated(ref account_delta) => { + // Query latest_account, apply delta, insert inflight row. + let current_account = + get_account(conn, account_id)?.expect("account must exist to apply delta"); + let mut updated = current_account; + updated.apply_delta(account_delta).expect( + "network account delta should apply since it was accepted by the mempool", + ); + + let insert = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(tx_id_bytes.clone()), + account_data: conversions::account_to_bytes(&updated), + }; + diesel::insert_into(schema::accounts::table).values(&insert).execute(conn)?; + }, + NetworkAccountEffect::Created(ref account) => { + let insert = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(tx_id_bytes.clone()), + account_data: conversions::account_to_bytes(account), + }; + diesel::insert_into(schema::accounts::table).values(&insert).execute(conn)?; + }, + } + } + + // Insert notes with created_by = tx_id. + // Uses INSERT OR IGNORE to make this idempotent if the same event is delivered twice + // (the nullifier PK would otherwise cause a constraint violation). + for note in notes { + let insert = NoteInsert { + nullifier: conversions::nullifier_to_bytes(¬e.nullifier()), + account_id: conversions::network_account_id_to_bytes(note.account_id()), + note_data: conversions::single_target_note_to_bytes(note), + attempt_count: 0, + last_attempt: None, + created_by: Some(tx_id_bytes.clone()), + consumed_by: None, + }; + diesel::insert_or_ignore_into(schema::notes::table) + .values(&insert) + .execute(conn)?; + } + + // Mark consumed notes: set consumed_by = tx_id for matching nullifiers. + for nullifier in nullifiers { + let nullifier_bytes = conversions::nullifier_to_bytes(nullifier); + + // Only mark notes that are not already consumed. + diesel::update( + schema::notes::table + .find(&nullifier_bytes) + .filter(schema::notes::consumed_by.is_null()), + ) + .set(schema::notes::consumed_by.eq(Some(&tx_id_bytes))) + .execute(conn)?; + } + + Ok(()) +} + +/// Handles a `BlockCommitted` event by committing transaction effects. +/// +/// # Raw SQL +/// +/// Per committed transaction: +/// +/// ```sql +/// -- Find inflight accounts for this tx +/// SELECT account_id FROM accounts WHERE transaction_id = ?1 +/// +/// -- Delete old committed row +/// DELETE FROM accounts WHERE account_id = ?1 AND transaction_id IS NULL +/// +/// -- Promote inflight row to committed +/// UPDATE accounts SET transaction_id = NULL +/// WHERE account_id = ?1 AND transaction_id = ?2 +/// +/// -- Delete consumed notes +/// DELETE FROM notes WHERE consumed_by = ?1 +/// +/// -- Promote inflight-created notes to committed +/// UPDATE notes SET created_by = NULL WHERE created_by = ?1 +/// ``` +/// +/// Finally updates chain state (see [`upsert_chain_state`]). +pub fn commit_block( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], + block_num: BlockNumber, + block_header: &BlockHeader, +) -> Result<(), DatabaseError> { + for tx_id in tx_ids { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Promote inflight account rows: delete old committed, set transaction_id = NULL. + // Find accounts that have an inflight row for this tx. + let inflight_account_ids: Vec> = schema::accounts::table + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)) + .select(schema::accounts::account_id) + .load(conn)?; + + for account_id_bytes in &inflight_account_ids { + // Delete the old committed row for this account. + diesel::delete( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .filter(schema::accounts::transaction_id.is_null()), + ) + .execute(conn)?; + + // Promote the inflight row to committed (set transaction_id = NULL). + // Only promote the row for this specific tx. + diesel::update( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)), + ) + .set(schema::accounts::transaction_id.eq(None::>)) + .execute(conn)?; + } + + // Delete consumed notes (consumed_by = tx_id). + diesel::delete(schema::notes::table.filter(schema::notes::consumed_by.eq(&tx_id_bytes))) + .execute(conn)?; + + // Promote inflight-created notes to committed (set created_by = NULL). + diesel::update(schema::notes::table.filter(schema::notes::created_by.eq(&tx_id_bytes))) + .set(schema::notes::created_by.eq(None::>)) + .execute(conn)?; + } + + // Update chain state. + upsert_chain_state(conn, block_num, block_header)?; + + Ok(()) +} + +/// Handles a `TransactionsReverted` event by undoing transaction effects. +/// +/// Returns the list of account IDs whose creation was reverted (no committed row exists for that +/// account after removing the inflight rows). +/// +/// # Raw SQL +/// +/// Per reverted transaction: +/// +/// ```sql +/// -- Find affected accounts +/// SELECT account_id FROM accounts WHERE transaction_id = ?1 +/// +/// -- Delete inflight account rows +/// DELETE FROM accounts WHERE transaction_id = ?1 +/// +/// -- Check if account creation was fully reverted +/// SELECT COUNT(*) FROM accounts WHERE account_id = ?1 +/// +/// -- Delete inflight-created notes +/// DELETE FROM notes WHERE created_by = ?1 +/// +/// -- Restore consumed notes +/// UPDATE notes SET consumed_by = NULL WHERE consumed_by = ?1 +/// ``` +pub fn revert_transaction( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], +) -> Result, DatabaseError> { + let mut reverted_accounts = Vec::new(); + + for tx_id in tx_ids { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Find accounts affected by this transaction. + let affected_account_ids: Vec> = schema::accounts::table + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)) + .select(schema::accounts::account_id) + .load(conn)?; + + // Delete inflight account rows for this tx. + diesel::delete( + schema::accounts::table.filter(schema::accounts::transaction_id.eq(&tx_id_bytes)), + ) + .execute(conn)?; + + // Check if any affected accounts had their creation fully reverted + // (no committed row and no remaining inflight rows). + for account_id_bytes in &affected_account_ids { + let remaining: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .count() + .get_result(conn)?; + + if remaining == 0 { + let account_id = conversions::network_account_id_from_bytes(account_id_bytes)?; + reverted_accounts.push(account_id); + } + } + + // Delete inflight-created notes (created_by = tx_id). + diesel::delete(schema::notes::table.filter(schema::notes::created_by.eq(&tx_id_bytes))) + .execute(conn)?; + + // Un-nullify consumed notes (set consumed_by = NULL where consumed_by = tx_id). + diesel::update(schema::notes::table.filter(schema::notes::consumed_by.eq(&tx_id_bytes))) + .set(schema::notes::consumed_by.eq(None::>)) + .execute(conn)?; + } + + Ok(reverted_accounts) +} diff --git a/crates/ntx-builder/src/db/models/queries/notes.rs b/crates/ntx-builder/src/db/models/queries/notes.rs new file mode 100644 index 000000000..c33b84702 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/notes.rs @@ -0,0 +1,193 @@ +//! Note-related queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; + +use crate::actor::inflight_note::InflightNetworkNote; +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +/// Row read from the unified `notes` table. +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name = schema::notes)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct NoteRow { + pub note_data: Vec, + pub attempt_count: i32, + pub last_attempt: Option, +} + +/// Row for inserting into the unified `notes` table. +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::notes)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct NoteInsert { + pub nullifier: Vec, + pub account_id: Vec, + pub note_data: Vec, + pub attempt_count: i32, + pub last_attempt: Option, + pub created_by: Option>, + pub consumed_by: Option>, +} + +// QUERIES +// ================================================================================================ + +/// Batch inserts committed notes (`created_by = NULL`, `consumed_by = NULL`). +/// +/// # Raw SQL +/// +/// Per note: +/// +/// ```sql +/// INSERT OR REPLACE INTO notes +/// (nullifier, account_id, note_data, attempt_count, last_attempt, created_by, consumed_by) +/// VALUES (?1, ?2, ?3, 0, NULL, NULL, NULL) +/// ``` +pub fn insert_committed_notes( + conn: &mut SqliteConnection, + notes: &[SingleTargetNetworkNote], +) -> Result<(), DatabaseError> { + for note in notes { + let row = NoteInsert { + nullifier: conversions::nullifier_to_bytes(¬e.nullifier()), + account_id: conversions::network_account_id_to_bytes(note.account_id()), + note_data: conversions::single_target_note_to_bytes(note), + attempt_count: 0, + last_attempt: None, + created_by: None, + consumed_by: None, + }; + diesel::replace_into(schema::notes::table).values(&row).execute(conn)?; + } + Ok(()) +} + +/// Returns notes available for consumption by a given account. +/// +/// Queries unconsumed notes (`consumed_by IS NULL`) for the account that have not exceeded the +/// maximum attempt count, then applies backoff filtering in Rust via +/// `InflightNetworkNote::is_available`. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT note_data, attempt_count, last_attempt +/// FROM notes +/// WHERE +/// account_id = ?1 +/// AND consumed_by IS NULL +/// AND attempt_count < ?2 +/// ``` +#[expect(clippy::cast_possible_wrap)] +pub fn available_notes( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_attempts: usize, +) -> Result, DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // Get unconsumed notes for this account that haven't exceeded the max attempt count. + let rows: Vec = schema::notes::table + .filter(schema::notes::account_id.eq(&account_id_bytes)) + .filter(schema::notes::consumed_by.is_null()) + .filter(schema::notes::attempt_count.lt(max_attempts as i32)) + .select(NoteRow::as_select()) + .load(conn)?; + + let mut result = Vec::new(); + for row in rows { + #[expect(clippy::cast_sign_loss)] + let attempt_count = row.attempt_count as usize; + let note = note_row_to_inflight( + &row.note_data, + attempt_count, + row.last_attempt.map(conversions::block_num_from_i64), + )?; + if note.is_available(block_num) { + result.push(note); + } + } + + Ok(result) +} + +/// Marks notes as failed by incrementing `attempt_count` and setting `last_attempt`. +/// +/// # Raw SQL +/// +/// Per nullifier: +/// +/// ```sql +/// UPDATE notes +/// SET attempt_count = attempt_count + 1, last_attempt = ?1 +/// WHERE nullifier = ?2 +/// ``` +pub fn notes_failed( + conn: &mut SqliteConnection, + nullifiers: &[Nullifier], + block_num: BlockNumber, +) -> Result<(), DatabaseError> { + let block_num_val = conversions::block_num_to_i64(block_num); + + for nullifier in nullifiers { + let nullifier_bytes = conversions::nullifier_to_bytes(nullifier); + + diesel::update(schema::notes::table.find(&nullifier_bytes)) + .set(( + schema::notes::attempt_count.eq(schema::notes::attempt_count + 1), + schema::notes::last_attempt.eq(Some(block_num_val)), + )) + .execute(conn)?; + } + Ok(()) +} + +/// Drops notes for the given account that have exceeded the maximum attempt count. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM notes +/// WHERE account_id = ?1 AND attempt_count >= ?2 +/// ``` +#[expect(clippy::cast_possible_wrap)] +pub fn drop_failing_notes( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + max_attempts: usize, +) -> Result<(), DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + let max_attempts = max_attempts as i32; + + diesel::delete( + schema::notes::table + .filter(schema::notes::account_id.eq(&account_id_bytes)) + .filter(schema::notes::attempt_count.ge(max_attempts)), + ) + .execute(conn)?; + + Ok(()) +} + +// HELPERS +// ================================================================================================ + +/// Constructs an `InflightNetworkNote` from DB row fields. +fn note_row_to_inflight( + note_data: &[u8], + attempt_count: usize, + last_attempt: Option, +) -> Result { + let note = conversions::single_target_note_from_bytes(note_data)?; + Ok(InflightNetworkNote::from_parts(note, attempt_count, last_attempt)) +} diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs new file mode 100644 index 000000000..6ef55f9a2 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -0,0 +1,546 @@ +//! DB-level tests for NTX builder query functions. + +use diesel::prelude::*; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::Word; +use miden_protocol::account::{AccountId, AccountStorageMode, AccountType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteExecutionHint; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE, + AccountIdBuilder, +}; +use miden_protocol::transaction::TransactionId; +use miden_standards::note::NetworkAccountTarget; +use miden_standards::testing::note::NoteBuilder; +use rand_chacha::ChaCha20Rng; +use rand_chacha::rand_core::SeedableRng; + +use super::*; +use crate::db::models::conv as conversions; +use crate::db::{Db, schema}; + +// TEST HELPERS +// ================================================================================================ + +/// Creates a file-backed SQLite connection with migrations applied. +fn test_conn() -> (SqliteConnection, tempfile::TempDir) { + Db::test_conn() +} + +/// Creates a network account ID from a test constant. +fn mock_network_account_id() -> NetworkAccountId { + let account_id: AccountId = + ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE.try_into().unwrap(); + NetworkAccountId::try_from(account_id).unwrap() +} + +/// Creates a distinct network account ID using a seeded RNG. +fn mock_network_account_id_seeded(seed: u8) -> NetworkAccountId { + let account_id = AccountIdBuilder::new() + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Network) + .build_with_seed([seed; 32]); + NetworkAccountId::try_from(account_id).unwrap() +} + +/// Creates a unique `TransactionId` from a seed value. +fn mock_tx_id(seed: u64) -> TransactionId { + let w = |n: u64| Word::try_from([n, 0, 0, 0]).unwrap(); + TransactionId::new(w(seed), w(seed + 1), w(seed + 2), w(seed + 3)) +} + +/// Creates a `SingleTargetNetworkNote` targeting the given network account. +fn mock_single_target_note( + network_account_id: NetworkAccountId, + seed: u8, +) -> SingleTargetNetworkNote { + let mut rng = ChaCha20Rng::from_seed([seed; 32]); + let sender = AccountIdBuilder::new() + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Private) + .build_with_rng(&mut rng); + + let target = NetworkAccountTarget::new(network_account_id.inner(), NoteExecutionHint::Always) + .expect("network account should be valid target"); + + let note = NoteBuilder::new(sender, rng).attachment(target).build().unwrap(); + + SingleTargetNetworkNote::try_from(note).expect("note should be single-target network note") +} + +/// Counts the total number of rows in the `notes` table. +fn count_notes(conn: &mut SqliteConnection) -> i64 { + schema::notes::table.count().get_result(conn).unwrap() +} + +/// Counts the total number of rows in the `accounts` table. +fn count_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table.count().get_result(conn).unwrap() +} + +/// Counts inflight account rows. +fn count_inflight_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table + .filter(schema::accounts::transaction_id.is_not_null()) + .count() + .get_result(conn) + .unwrap() +} + +/// Counts committed account rows. +fn count_committed_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table + .filter(schema::accounts::transaction_id.is_null()) + .count() + .get_result(conn) + .unwrap() +} + +// PURGE INFLIGHT TESTS +// ================================================================================================ + +#[test] +fn purge_inflight_clears_all_inflight_state() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Insert committed account. + upsert_committed_account(conn, account_id, &mock_account(account_id)).unwrap(); + + // Insert a transaction (creates inflight account row + note + consumption). + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + assert!(count_inflight_accounts(conn) == 0); // No account delta, so no inflight account. + assert_eq!(count_notes(conn), 1); + + // Mark note as consumed by another tx. + let tx_id2 = mock_tx_id(2); + add_transaction(conn, &tx_id2, None, &[], &[note.nullifier()]).unwrap(); + + // Verify consumed_by is set. + let consumed_count: i64 = schema::notes::table + .filter(schema::notes::consumed_by.is_not_null()) + .count() + .get_result(conn) + .unwrap(); + assert_eq!(consumed_count, 1); + + // Purge inflight state. + purge_inflight(conn).unwrap(); + + // Inflight accounts should be gone. + assert_eq!(count_inflight_accounts(conn), 0); + // Committed account should remain. + assert_eq!(count_committed_accounts(conn), 1); + // Inflight-created notes should be gone. + assert_eq!(count_notes(conn), 0); +} + +// HANDLE TRANSACTION ADDED TESTS +// ================================================================================================ + +#[test] +fn transaction_added_inserts_notes_and_marks_consumed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note1 = mock_single_target_note(account_id, 10); + let note2 = mock_single_target_note(account_id, 20); + + // Insert committed note first (to test consumption). + insert_committed_notes(conn, std::slice::from_ref(¬e1)).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Add transaction that creates note2 and consumes note1. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e2), &[note1.nullifier()]) + .unwrap(); + + // Should now have 2 notes total. + assert_eq!(count_notes(conn), 2); + + // note1 should be consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e1.nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_some()); + + // note2 should have created_by set. + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e2.nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_some()); +} + +#[test] +fn transaction_added_is_idempotent_for_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Insert the same transaction twice. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + // Should only have one note (INSERT OR IGNORE). + assert_eq!(count_notes(conn), 1); +} + +// HANDLE BLOCK COMMITTED TESTS +// ================================================================================================ + +#[test] +fn block_committed_promotes_inflight_notes_to_committed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + + // Add a transaction that creates a note. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + // Verify created_by is set. + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_some()); + + // Commit the block. + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // created_by should now be NULL (promoted to committed). + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_none()); +} + +#[test] +fn block_committed_deletes_consumed_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + // Insert a committed note. + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Consume it via a transaction. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note.nullifier()]).unwrap(); + + // Commit the block. + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // Consumed note should be deleted. + assert_eq!(count_notes(conn), 0); +} + +#[test] +fn block_committed_promotes_inflight_account_to_committed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let account = mock_account(account_id); + + // Insert committed account. + upsert_committed_account(conn, account_id, &account).unwrap(); + assert_eq!(count_committed_accounts(conn), 1); + + // Insert inflight row. + let tx_id = mock_tx_id(1); + let row = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(conversions::transaction_id_to_bytes(&tx_id)), + account_data: conversions::account_to_bytes(&account), + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn).unwrap(); + + assert_eq!(count_inflight_accounts(conn), 1); + assert_eq!(count_committed_accounts(conn), 1); + + // Commit the block. + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // Should have 1 committed and 0 inflight. + assert_eq!(count_committed_accounts(conn), 1); + assert_eq!(count_inflight_accounts(conn), 0); +} + +// HANDLE TRANSACTIONS REVERTED TESTS +// ================================================================================================ + +#[test] +fn transactions_reverted_restores_consumed_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + // Insert committed note. + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + + // Consume it via a transaction. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note.nullifier()]).unwrap(); + + // Verify consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_some()); + + // Revert the transaction. + let reverted = revert_transaction(conn, &[tx_id]).unwrap(); + assert!(reverted.is_empty()); + + // Note should be un-consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_none()); +} + +#[test] +fn transactions_reverted_deletes_inflight_created_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Add transaction that creates a note. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Revert the transaction. + revert_transaction(conn, &[tx_id]).unwrap(); + + // Inflight-created note should be deleted. + assert_eq!(count_notes(conn), 0); +} + +#[test] +fn transactions_reverted_reports_reverted_account_creations() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let account = mock_account(account_id); + let tx_id = mock_tx_id(1); + + // Insert an inflight account row (simulating account creation by tx). + let row = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(conversions::transaction_id_to_bytes(&tx_id)), + account_data: conversions::account_to_bytes(&account), + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn).unwrap(); + + // Revert the transaction --- account creation should be reported. + let reverted = revert_transaction(conn, &[tx_id]).unwrap(); + assert_eq!(reverted.len(), 1); + assert_eq!(reverted[0], account_id); + + // Account should be gone. + assert_eq!(count_accounts(conn), 0); +} + +// AVAILABLE NOTES TESTS +// ================================================================================================ + +#[test] +fn available_notes_filters_consumed_and_exceeded_attempts() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note_good = mock_single_target_note(account_id, 10); + let note_consumed = mock_single_target_note(account_id, 20); + let note_failed = mock_single_target_note(account_id, 30); + + // Insert all as committed. + insert_committed_notes(conn, &[note_good.clone(), note_consumed.clone(), note_failed.clone()]) + .unwrap(); + + // Consume one note. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note_consumed.nullifier()]).unwrap(); + + // Mark one note as failed many times (exceed max_attempts=3). + let block_num = BlockNumber::from(100u32); + notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + + // Query available notes with max_attempts=3. + let result = available_notes(conn, account_id, block_num, 3).unwrap(); + + // Only note_good should be available (note_consumed is consumed, note_failed exceeded + // attempts). + assert_eq!(result.len(), 1); + assert_eq!(result[0].to_inner().nullifier(), note_good.nullifier()); +} + +#[test] +fn available_notes_only_returns_notes_for_specified_account() { + let (conn, _dir) = &mut test_conn(); + + let account_id_1 = mock_network_account_id(); + let account_id_2 = mock_network_account_id_seeded(42); + + let note_acct1 = mock_single_target_note(account_id_1, 10); + let note_acct2 = mock_single_target_note(account_id_2, 20); + + insert_committed_notes(conn, &[note_acct1.clone(), note_acct2]).unwrap(); + + let block_num = BlockNumber::from(100u32); + let result = available_notes(conn, account_id_1, block_num, 30).unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].to_inner().nullifier(), note_acct1.nullifier()); +} + +// DROP FAILING NOTES TESTS +// ================================================================================================ + +#[test] +fn drop_failing_notes_scoped_to_account() { + let (conn, _dir) = &mut test_conn(); + + let account_id_1 = mock_network_account_id(); + let account_id_2 = mock_network_account_id_seeded(42); + + let note_acct1 = mock_single_target_note(account_id_1, 10); + let note_acct2 = mock_single_target_note(account_id_2, 20); + + // Insert both as committed. + insert_committed_notes(conn, &[note_acct1.clone(), note_acct2.clone()]).unwrap(); + + // Fail both notes enough times to exceed max_attempts=2. + let block_num = BlockNumber::from(100u32); + notes_failed(conn, &[note_acct1.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_acct1.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_acct2.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_acct2.nullifier()], block_num).unwrap(); + + // Drop failing notes for account_id_1 only. + drop_failing_notes(conn, account_id_1, 2).unwrap(); + + // note_acct1 should be deleted, note_acct2 should remain. + assert_eq!(count_notes(conn), 1); + let remaining: Vec> = + schema::notes::table.select(schema::notes::nullifier).load(conn).unwrap(); + assert_eq!(remaining[0], conversions::nullifier_to_bytes(¬e_acct2.nullifier())); +} + +// NOTES FAILED TESTS +// ================================================================================================ + +#[test] +fn notes_failed_increments_attempt_count() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + + let block_num = BlockNumber::from(5u32); + notes_failed(conn, &[note.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note.nullifier()], block_num).unwrap(); + + let (attempt_count, last_attempt): (i32, Option) = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select((schema::notes::attempt_count, schema::notes::last_attempt)) + .first(conn) + .unwrap(); + + assert_eq!(attempt_count, 2); + assert_eq!(last_attempt, Some(conversions::block_num_to_i64(block_num))); +} + +// CHAIN STATE TESTS +// ================================================================================================ + +#[test] +fn upsert_chain_state_updates_singleton() { + let (conn, _dir) = &mut test_conn(); + + let block_num_1 = BlockNumber::from(1u32); + let header_1 = mock_block_header(block_num_1); + upsert_chain_state(conn, block_num_1, &header_1).unwrap(); + + // Upsert again with higher block. + let block_num_2 = BlockNumber::from(2u32); + let header_2 = mock_block_header(block_num_2); + upsert_chain_state(conn, block_num_2, &header_2).unwrap(); + + // Should only have one row. + let row_count: i64 = schema::chain_state::table.count().get_result(conn).unwrap(); + assert_eq!(row_count, 1); + + // Should have the latest block number. + let stored_block_num: i64 = schema::chain_state::table + .select(schema::chain_state::block_num) + .first(conn) + .unwrap(); + assert_eq!(stored_block_num, conversions::block_num_to_i64(block_num_2)); +} + +// HELPERS (domain type construction) +// ================================================================================================ + +/// Creates a mock `Account` for a network account. +/// +/// Uses `AccountBuilder` with minimal components needed for serialization. +fn mock_account(_account_id: NetworkAccountId) -> miden_protocol::account::Account { + use miden_protocol::account::auth::PublicKeyCommitment; + use miden_protocol::account::{AccountBuilder, AccountComponent}; + use miden_standards::account::auth::AuthFalcon512Rpo; + + let component_code = miden_standards::code_builder::CodeBuilder::default() + .compile_component_code("test::interface", "pub proc test_proc push.1.2 add end") + .unwrap(); + + let component = + AccountComponent::new(component_code, vec![]).unwrap().with_supports_all_types(); + + AccountBuilder::new([0u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Network) + .with_component(component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(Word::default()))) + .build_existing() + .unwrap() +} + +/// Creates a mock `BlockHeader` for the given block number. +fn mock_block_header(block_num: BlockNumber) -> miden_protocol::block::BlockHeader { + miden_protocol::block::BlockHeader::mock(block_num, None, None, &[], Word::default()) +} diff --git a/crates/ntx-builder/src/db/schema.rs b/crates/ntx-builder/src/db/schema.rs index 74ee8d462..6a70ee121 100644 --- a/crates/ntx-builder/src/db/schema.rs +++ b/crates/ntx-builder/src/db/schema.rs @@ -2,7 +2,7 @@ diesel::table! { accounts (order_id) { - order_id -> Nullable, + order_id -> Integer, account_id -> Binary, account_data -> Binary, transaction_id -> Nullable, @@ -11,8 +11,8 @@ diesel::table! { diesel::table! { chain_state (id) { - id -> Nullable, - block_num -> Integer, + id -> Integer, + block_num -> BigInt, block_header -> Binary, } } @@ -23,7 +23,7 @@ diesel::table! { account_id -> Binary, note_data -> Binary, attempt_count -> Integer, - last_attempt -> Nullable, + last_attempt -> Nullable, created_by -> Nullable, consumed_by -> Nullable, } diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 04c631c05..5732cb43f 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,4 +1,5 @@ use std::num::NonZeroUsize; +use std::path::PathBuf; use std::sync::Arc; use actor::AccountActorContext; @@ -6,6 +7,7 @@ use anyhow::Context; use block_producer::BlockProducerClient; use builder::{ChainState, MempoolEventStream}; use coordinator::Coordinator; +use db::Db; use futures::TryStreamExt; use miden_node_utils::lru_cache::LruCache; use store::StoreClient; @@ -16,9 +18,6 @@ mod actor; mod block_producer; mod builder; mod coordinator; -// TODO(santi): Remove this attr when the module is actually used. Dead code lint fails due to the -// tests. -#[cfg(test)] pub(crate) mod db; mod store; @@ -98,10 +97,18 @@ pub struct NtxBuilderConfig { /// Channel size for each actor's event channel. pub actor_channel_size: usize, + + /// Path to the SQLite database file used for persistent state. + pub database_filepath: PathBuf, } impl NtxBuilderConfig { - pub fn new(store_url: Url, block_producer_url: Url, validator_url: Url) -> Self { + pub fn new( + store_url: Url, + block_producer_url: Url, + validator_url: Url, + database_filepath: PathBuf, + ) -> Self { Self { store_url, block_producer_url, @@ -114,6 +121,7 @@ impl NtxBuilderConfig { max_block_count: DEFAULT_MAX_BLOCK_COUNT, account_channel_capacity: DEFAULT_ACCOUNT_CHANNEL_CAPACITY, actor_channel_size: DEFAULT_ACTOR_CHANNEL_SIZE, + database_filepath, } } @@ -197,8 +205,15 @@ impl NtxBuilderConfig { /// - The mempool subscription fails (after retries) /// - The store contains no blocks (not bootstrapped) pub async fn build(self) -> anyhow::Result { + // Set up the database (bootstrap + connection pool). + let db = Db::setup(self.database_filepath.clone()).await?; + + // Purge inflight state from previous run. + db.purge_inflight().await.context("failed to purge inflight state")?; + let script_cache = LruCache::new(self.script_cache_size); - let coordinator = Coordinator::new(self.max_concurrent_txs, self.actor_channel_size); + let coordinator = + Coordinator::new(self.max_concurrent_txs, self.actor_channel_size, db.clone()); let store = StoreClient::new(self.store_url.clone()); let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); @@ -227,6 +242,11 @@ impl NtxBuilderConfig { } }; + // Store the chain tip in the DB. + db.upsert_chain_state(chain_tip_header.block_num(), chain_tip_header.clone()) + .await + .context("failed to upsert chain state")?; + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); let actor_context = AccountActorContext { @@ -238,12 +258,14 @@ impl NtxBuilderConfig { script_cache, max_notes_per_tx: self.max_notes_per_tx, max_note_attempts: self.max_note_attempts, + db: db.clone(), }; Ok(NetworkTransactionBuilder::new( self, coordinator, store, + db, chain_state, actor_context, mempool_events, From c6ad6be7c84297cf82a936f788126c6af500183c Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 20 Feb 2026 08:51:10 +0200 Subject: [PATCH 46/77] refactor(prover): combine proxy and worker (#1688) --- CHANGELOG.md | 3 +- Cargo.lock | 1265 ++--------------- Cargo.toml | 8 + Makefile | 2 +- bin/remote-prover/.env | 36 +- bin/remote-prover/Cargo.toml | 68 +- bin/remote-prover/README.md | 344 +---- bin/remote-prover/grafana_dashboard.json | 1082 -------------- bin/remote-prover/prometheus.yml | 16 - bin/remote-prover/src/api/mod.rs | 25 - bin/remote-prover/src/api/prover.rs | 352 ----- bin/remote-prover/src/commands/mod.rs | 125 -- bin/remote-prover/src/commands/proxy.rs | 129 -- .../src/commands/update_workers.rs | 126 -- bin/remote-prover/src/commands/worker.rs | 81 -- bin/remote-prover/src/error.rs | 27 - .../src/generated/conversions.rs | 90 -- bin/remote-prover/src/generated/mod.rs | 4 +- bin/remote-prover/src/lib.rs | 6 - bin/remote-prover/src/main.rs | 16 +- bin/remote-prover/src/proxy/health_check.rs | 70 - bin/remote-prover/src/proxy/metrics.rs | 97 -- bin/remote-prover/src/proxy/mod.rs | 772 ---------- bin/remote-prover/src/proxy/update_workers.rs | 152 -- bin/remote-prover/src/proxy/worker.rs | 419 ------ bin/remote-prover/src/server/mod.rs | 103 ++ bin/remote-prover/src/server/proof_kind.rs | 35 + bin/remote-prover/src/server/prover.rs | 122 ++ bin/remote-prover/src/server/service.rs | 88 ++ .../src/{api => server}/status.rs | 17 +- bin/remote-prover/src/server/tests.rs | 372 +++++ bin/remote-prover/src/utils.rs | 178 --- .../prover-proxy/miden-prover-proxy.service | 17 - packaging/prover-proxy/postinst | 28 - packaging/prover-proxy/postrm | 9 - packaging/prover/miden-prover.service | 3 +- packaging/prover/postinst | 20 +- packaging/prover/postrm | 2 - 38 files changed, 961 insertions(+), 5348 deletions(-) delete mode 100644 bin/remote-prover/grafana_dashboard.json delete mode 100644 bin/remote-prover/prometheus.yml delete mode 100644 bin/remote-prover/src/api/mod.rs delete mode 100644 bin/remote-prover/src/api/prover.rs delete mode 100644 bin/remote-prover/src/commands/mod.rs delete mode 100644 bin/remote-prover/src/commands/proxy.rs delete mode 100644 bin/remote-prover/src/commands/update_workers.rs delete mode 100644 bin/remote-prover/src/commands/worker.rs delete mode 100644 bin/remote-prover/src/error.rs delete mode 100644 bin/remote-prover/src/generated/conversions.rs delete mode 100644 bin/remote-prover/src/lib.rs delete mode 100644 bin/remote-prover/src/proxy/health_check.rs delete mode 100644 bin/remote-prover/src/proxy/metrics.rs delete mode 100644 bin/remote-prover/src/proxy/mod.rs delete mode 100644 bin/remote-prover/src/proxy/update_workers.rs delete mode 100644 bin/remote-prover/src/proxy/worker.rs create mode 100644 bin/remote-prover/src/server/mod.rs create mode 100644 bin/remote-prover/src/server/proof_kind.rs create mode 100644 bin/remote-prover/src/server/prover.rs create mode 100644 bin/remote-prover/src/server/service.rs rename bin/remote-prover/src/{api => server}/status.rs (51%) create mode 100644 bin/remote-prover/src/server/tests.rs delete mode 100644 bin/remote-prover/src/utils.rs delete mode 100644 packaging/prover-proxy/miden-prover-proxy.service delete mode 100644 packaging/prover-proxy/postinst delete mode 100644 packaging/prover-proxy/postrm diff --git a/CHANGELOG.md b/CHANGELOG.md index 49d044e4c..171a649fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,8 @@ - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). - Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). - Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) -- Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). + - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). +- [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). ## v0.13.5 (TBD) diff --git a/Cargo.lock b/Cargo.lock index c8a82122c..925fc1725 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,19 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "getrandom 0.3.4", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.4" @@ -49,21 +36,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "allocator-api2" version = "0.2.21" @@ -144,15 +116,6 @@ dependencies = [ "backtrace", ] -[[package]] -name = "arc-swap" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" -dependencies = [ - "rustversion", -] - [[package]] name = "arrayref" version = "0.3.9" @@ -206,17 +169,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.5.0" @@ -348,7 +300,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags", "cexpr", "clang-sys", "itertools 0.10.5", @@ -375,27 +327,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest", -] - [[package]] name = "blake3" version = "1.8.3" @@ -419,27 +356,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "brotli" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bumpalo" version = "3.19.1" @@ -501,34 +417,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cf-rustracing" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f85c3824e4191621dec0551e3cef3d511f329da9a8990bf3e450a85651d97e" -dependencies = [ - "backtrace", - "rand 0.8.5", - "tokio", - "trackable", -] - -[[package]] -name = "cf-rustracing-jaeger" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a5f80d44c257c3300a7f45ada676c211e64bbbac591bbec19344a8f61fbcab" -dependencies = [ - "cf-rustracing", - "hostname", - "local-ip-address", - "percent-encoding", - "rand 0.9.2", - "thrift_codec", - "tokio", - "trackable", -] - [[package]] name = "cfg-if" version = "1.0.4" @@ -621,23 +509,6 @@ dependencies = [ "libloading", ] -[[package]] -name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "atty", - "bitflags 1.3.2", - "clap_derive 3.2.25", - "clap_lex 0.2.4", - "indexmap 1.9.3", - "once_cell", - "strsim 0.10.0", - "termcolor", - "textwrap", -] - [[package]] name = "clap" version = "4.5.55" @@ -645,7 +516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" dependencies = [ "clap_builder", - "clap_derive 4.5.55", + "clap_derive", ] [[package]] @@ -656,21 +527,8 @@ checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.7", - "strsim 0.11.1", -] - -[[package]] -name = "clap_derive" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" -dependencies = [ - "heck 0.4.1", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", + "clap_lex", + "strsim", ] [[package]] @@ -679,36 +537,18 @@ version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.114", ] -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "clap_lex" version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" -[[package]] -name = "cmake" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" -dependencies = [ - "cc", -] - [[package]] name = "colorchoice" version = "1.0.4" @@ -762,15 +602,6 @@ dependencies = [ "libc", ] -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - [[package]] name = "criterion" version = "0.5.1" @@ -780,7 +611,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.55", + "clap", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -826,15 +657,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "crossbeam-queue" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -897,47 +719,14 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "daemonize" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e" -dependencies = [ - "libc", -] - -[[package]] -name = "darling" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" -dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", -] - [[package]] name = "darling" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", -] - -[[package]] -name = "darling_core" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.114", + "darling_core", + "darling_macro", ] [[package]] @@ -950,18 +739,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.11.1", - "syn 2.0.114", -] - -[[package]] -name = "darling_macro" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" -dependencies = [ - "darling_core 0.20.11", - "quote", + "strsim", "syn 2.0.114", ] @@ -971,7 +749,7 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core 0.21.3", + "darling_core", "quote", "syn 2.0.114", ] @@ -1036,48 +814,6 @@ dependencies = [ "powerfmt", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" -dependencies = [ - "darling 0.20.11", - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "derive_builder_macro" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" -dependencies = [ - "derive_builder_core", - "syn 2.0.114", -] - [[package]] name = "derive_more" version = "2.1.1" @@ -1196,9 +932,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd122633e4bef06db27737f21d3738fb89c8f6d5360d6d9d7635dda142a7757e" dependencies = [ - "darling 0.21.3", + "darling", "either", - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.114", @@ -1375,17 +1111,6 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" -[[package]] -name = "flate2" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" -dependencies = [ - "crc32fast", - "libz-ng-sys", - "miniz_oxide", -] - [[package]] name = "flume" version = "0.11.1" @@ -1404,12 +1129,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foldhash" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" - [[package]] name = "foldhash" version = "0.2.0" @@ -1597,18 +1316,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "getset" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" -dependencies = [ - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "gimli" version = "0.32.3" @@ -1644,7 +1351,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.13.0", + "indexmap", "slab", "tokio", "tokio-util", @@ -1662,23 +1369,6 @@ dependencies = [ "zerocopy", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.15.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash 0.1.5", -] - [[package]] name = "hashbrown" version = "0.16.1" @@ -1687,33 +1377,18 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", - "foldhash 0.2.0", + "foldhash", "rayon", "serde", "serde_core", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.5.2" @@ -1744,17 +1419,6 @@ dependencies = [ "digest", ] -[[package]] -name = "hostname" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" -dependencies = [ - "cfg-if", - "libc", - "windows-link", -] - [[package]] name = "http" version = "1.4.0" @@ -2038,16 +1702,6 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.13.0" @@ -2055,7 +1709,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.16.1", + "hashbrown", ] [[package]] @@ -2095,7 +1749,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ - "hermit-abi 0.5.2", + "hermit-abi", "libc", "windows-sys 0.61.2", ] @@ -2286,16 +1940,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-ng-sys" -version = "1.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf914b7dd154ca9193afec311d8e39345c1bd93b48b3faa77329f0db8f553c0" -dependencies = [ - "cmake", - "libc", -] - [[package]] name = "libz-sys" version = "1.1.23" @@ -2307,12 +1951,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -2331,17 +1969,6 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" -[[package]] -name = "local-ip-address" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" -dependencies = [ - "libc", - "neli", - "windows-sys 0.61.2", -] - [[package]] name = "lock_api" version = "0.4.14" @@ -2404,15 +2031,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "lru" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8cc7106155f10bdf99a6f379688f543ad6596a415375b36a59a054ceda1198" -dependencies = [ - "hashbrown 0.15.5", -] - [[package]] name = "lru" version = "0.16.3" @@ -2456,15 +2074,6 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - [[package]] name = "miden-agglayer" version = "0.14.0" @@ -2489,7 +2098,7 @@ checksum = "ab2f1db9cdbd5da3eaf07fa0a8122d27b575f96b0699388c98f6c0e468cb9c1f" dependencies = [ "miden-core", "miden-utils-indexing", - "thiserror 2.0.18", + "thiserror", "winter-air", "winter-prover", ] @@ -2506,7 +2115,7 @@ dependencies = [ "miden-core", "miden-mast-package", "smallvec", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -2530,7 +2139,7 @@ dependencies = [ "rustc_version 0.4.1", "semver 1.0.27", "smallvec", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -2539,7 +2148,7 @@ version = "0.14.0" source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -2559,7 +2168,7 @@ dependencies = [ "num-traits", "proptest", "proptest-derive", - "thiserror 2.0.18", + "thiserror", "winter-math", "winter-utils", ] @@ -2578,7 +2187,7 @@ dependencies = [ "miden-processor", "miden-utils-sync", "sha2", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -2594,14 +2203,14 @@ dependencies = [ "ed25519-dalek", "flume", "glob", - "hashbrown 0.16.1", + "hashbrown", "hkdf", "k256", "miden-crypto-derive", "num", "num-complex", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_core 0.9.5", "rand_hc", "rayon", @@ -2609,7 +2218,7 @@ dependencies = [ "sha2", "sha3", "subtle", - "thiserror 2.0.18", + "thiserror", "winter-crypto", "winter-math", "winter-utils", @@ -2641,7 +2250,7 @@ dependencies = [ "paste", "serde", "serde_spanned 1.0.4", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -2662,7 +2271,7 @@ dependencies = [ "derive_more", "miden-assembly-syntax", "miden-core", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -2691,7 +2300,7 @@ dependencies = [ "syn 2.0.114", "terminal_size 0.3.0", "textwrap", - "thiserror 2.0.18", + "thiserror", "trybuild", "unicode-width 0.1.14", ] @@ -2713,7 +2322,7 @@ version = "0.14.0" dependencies = [ "anyhow", "axum", - "clap 4.5.55", + "clap", "hex", "humantime", "miden-node-proto", @@ -2723,8 +2332,8 @@ dependencies = [ "miden-standards", "miden-testing", "miden-tx", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "reqwest", "serde", "serde_json", @@ -2741,7 +2350,7 @@ name = "miden-node" version = "0.14.0" dependencies = [ "anyhow", - "clap 4.5.55", + "clap", "figment", "fs-err", "hex", @@ -2778,12 +2387,12 @@ dependencies = [ "miden-tx", "miden-tx-batch-prover", "pretty_assertions", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rstest", "serial_test", "tempfile", - "thiserror 2.0.18", + "thiserror", "tokio", "tokio-stream", "tonic", @@ -2803,7 +2412,7 @@ dependencies = [ "deadpool-sync", "diesel", "miden-protocol", - "thiserror 2.0.18", + "thiserror", "tracing", ] @@ -2833,10 +2442,10 @@ dependencies = [ "miden-standards", "miden-tx", "prost", - "rand_chacha 0.9.0", + "rand_chacha", "rstest", "tempfile", - "thiserror 2.0.18", + "thiserror", "tokio", "tokio-stream", "tokio-util", @@ -2863,7 +2472,7 @@ dependencies = [ "miette", "proptest", "prost", - "thiserror 2.0.18", + "thiserror", "tonic", "tonic-prost", "tonic-prost-build", @@ -2904,7 +2513,7 @@ dependencies = [ "rstest", "semver 1.0.27", "tempfile", - "thiserror 2.0.18", + "thiserror", "tokio", "tokio-stream", "tonic", @@ -2930,7 +2539,7 @@ dependencies = [ "fs-err", "futures", "hex", - "indexmap 2.13.0", + "indexmap", "libsqlite3-sys", "miden-block-prover", "miden-crypto", @@ -2944,12 +2553,12 @@ dependencies = [ "miden-remote-prover-client", "miden-standards", "pretty_assertions", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "regex", "serde", "termtree", - "thiserror 2.0.18", + "thiserror", "tokio", "tokio-stream", "toml 0.9.11+spec-1.1.0", @@ -2964,7 +2573,7 @@ dependencies = [ name = "miden-node-stress-test" version = "0.14.0" dependencies = [ - "clap 4.5.55", + "clap", "fs-err", "futures", "miden-air", @@ -2975,7 +2584,7 @@ dependencies = [ "miden-node-utils", "miden-protocol", "miden-standards", - "rand 0.9.2", + "rand", "rayon", "tokio", "tonic", @@ -3000,15 +2609,15 @@ dependencies = [ "http", "http-body-util", "itertools 0.14.0", - "lru 0.16.3", + "lru", "miden-node-rocksdb-cxx-linkage-fix", "miden-protocol", "opentelemetry", "opentelemetry-otlp", "opentelemetry_sdk", - "rand 0.9.2", + "rand", "serde", - "thiserror 2.0.18", + "thiserror", "tokio", "tonic", "tower-http", @@ -3032,7 +2641,7 @@ dependencies = [ "miden-node-utils", "miden-protocol", "miden-tx", - "thiserror 2.0.18", + "thiserror", "tokio", "tokio-stream", "tonic", @@ -3055,7 +2664,7 @@ dependencies = [ "miden-utils-indexing", "paste", "rayon", - "thiserror 2.0.18", + "thiserror", "tokio", "tracing", "winter-prover", @@ -3079,13 +2688,13 @@ dependencies = [ "miden-protocol-macros", "miden-utils-sync", "miden-verifier", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_xoshiro", "regex", "semver 1.0.27", "serde", - "thiserror 2.0.18", + "thiserror", "toml 0.9.11+spec-1.1.0", "walkdir", "winter-rand-utils", @@ -3121,9 +2730,7 @@ version = "0.14.0" dependencies = [ "anyhow", "async-trait", - "axum", - "bytes", - "clap 4.5.55", + "clap", "http", "humantime", "miden-block-prover", @@ -3138,28 +2745,17 @@ dependencies = [ "miden-tx-batch-prover", "miette", "opentelemetry", - "pingora", - "pingora-core", - "pingora-limits", - "pingora-proxy", - "prometheus 0.14.0", "prost", - "reqwest", - "semver 1.0.27", - "serde", - "serde_qs", - "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", "tonic-health", "tonic-prost", "tonic-prost-build", + "tonic-reflection", "tonic-web", "tower-http", "tracing", - "tracing-opentelemetry", - "uuid", ] [[package]] @@ -3173,7 +2769,7 @@ dependencies = [ "miden-tx", "miette", "prost", - "thiserror 2.0.18", + "thiserror", "tokio", "tonic", "tonic-prost", @@ -3193,9 +2789,9 @@ dependencies = [ "miden-core-lib", "miden-processor", "miden-protocol", - "rand 0.9.2", + "rand", "regex", - "thiserror 2.0.18", + "thiserror", "walkdir", ] @@ -3215,9 +2811,9 @@ dependencies = [ "miden-standards", "miden-tx", "miden-tx-batch-prover", - "rand 0.9.2", - "rand_chacha 0.9.0", - "thiserror 2.0.18", + "rand", + "rand_chacha", + "thiserror", "winterfell", ] @@ -3231,7 +2827,7 @@ dependencies = [ "miden-prover", "miden-standards", "miden-verifier", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -3273,7 +2869,7 @@ version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57046b5c263b78e7fa5a6e328ca852e6319cf844faa26fbdcbb128ec555deb2a" dependencies = [ - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -3295,7 +2891,7 @@ checksum = "fe033af062937938ded511e5238db3bf8e0c1a30205850d62fb23271b3c96f85" dependencies = [ "miden-air", "miden-core", - "thiserror 2.0.18", + "thiserror", "tracing", "winter-verifier", ] @@ -3308,7 +2904,7 @@ checksum = "9d4cfab04baffdda3fb9eafa5f873604059b89a1699aa95e4f1057397a69f0b5" dependencies = [ "miden-formatting", "smallvec", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -3381,7 +2977,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", - "simd-adler32", ] [[package]] @@ -3427,53 +3022,12 @@ dependencies = [ "tempfile", ] -[[package]] -name = "neli" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" -dependencies = [ - "bitflags 2.10.0", - "byteorder", - "derive_builder", - "getset", - "libc", - "log", - "neli-proc-macros", - "parking_lot", -] - -[[package]] -name = "neli-proc-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" -dependencies = [ - "either", - "proc-macro2", - "quote", - "serde", - "syn 2.0.114", -] - [[package]] name = "new_debug_unreachable" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset", -] - [[package]] name = "nom" version = "7.1.3" @@ -3590,7 +3144,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.5.2", + "hermit-abi", "libc", ] @@ -3633,7 +3187,7 @@ version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.10.0", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -3687,7 +3241,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.18", + "thiserror", "tracing", ] @@ -3702,7 +3256,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror 2.0.18", + "thiserror", "tokio", "tonic", ] @@ -3731,18 +3285,12 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.2", - "thiserror 2.0.18", + "rand", + "thiserror", "tokio", "tokio-stream", ] -[[package]] -name = "os_str_bytes" -version = "6.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" - [[package]] name = "owo-colors" version = "4.2.3" @@ -3814,7 +3362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.13.0", + "indexmap", ] [[package]] @@ -3843,265 +3391,20 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pingora" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a1f02a6347e81953ab831fdcf090a028db12d67ec3badf47831d1299dac6e20" -dependencies = [ - "pingora-core", - "pingora-http", - "pingora-load-balancing", - "pingora-proxy", - "pingora-timeout", -] - -[[package]] -name = "pingora-cache" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" -dependencies = [ - "ahash", - "async-trait", - "blake2", - "bytes", - "cf-rustracing", - "cf-rustracing-jaeger", - "hex", - "http", - "httparse", - "httpdate", - "indexmap 1.9.3", - "log", - "lru 0.14.0", - "once_cell", - "parking_lot", - "pingora-core", - "pingora-error", - "pingora-header-serde", - "pingora-http", - "pingora-lru", - "pingora-timeout", - "rand 0.8.5", - "regex", - "rmp", - "rmp-serde", - "serde", - "strum", - "tokio", -] - -[[package]] -name = "pingora-core" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" -dependencies = [ - "ahash", - "async-trait", - "brotli", - "bytes", - "chrono", - "clap 3.2.25", - "daemonize", - "derivative", - "flate2", - "futures", - "h2", - "http", - "httparse", - "httpdate", - "libc", - "log", - "nix", - "once_cell", - "openssl-probe 0.1.6", - "parking_lot", - "percent-encoding", - "pingora-error", - "pingora-http", - "pingora-pool", - "pingora-runtime", - "pingora-timeout", - "prometheus 0.13.4", - "rand 0.8.5", - "regex", - "serde", - "serde_yaml", - "sfv", - "socket2", - "strum", - "strum_macros", - "tokio", - "tokio-test", - "unicase", - "windows-sys 0.59.0", - "zstd", -] - -[[package]] -name = "pingora-error" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52119570d3f4644e09654ad24df2b7d851bf12eaa8c4148b4674c7f90916598e" - -[[package]] -name = "pingora-header-serde" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "252a16def05c7adbbdda776e87b2be36e9481c8a77249207a2f3b563e8933b35" -dependencies = [ - "bytes", - "http", - "httparse", - "pingora-error", - "pingora-http", - "thread_local", - "zstd", - "zstd-safe", -] - -[[package]] -name = "pingora-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3542fd0fd0a83212882c5066ae739ba51804f20d624ff7e12ec85113c5c89a" -dependencies = [ - "bytes", - "http", - "pingora-error", -] - -[[package]] -name = "pingora-ketama" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5dd8546b1874d5cfca594375c1cfb852c3dffd4f060428fa031a6e790dea18" -dependencies = [ - "crc32fast", -] - -[[package]] -name = "pingora-limits" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" -dependencies = [ - "ahash", -] - -[[package]] -name = "pingora-load-balancing" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5bb0314830a64b73b50b3782f3089f87947b61b4324c804d6f8d4ff9ce1c70" -dependencies = [ - "arc-swap", - "async-trait", - "derivative", - "fnv", - "futures", - "http", - "log", - "pingora-core", - "pingora-error", - "pingora-http", - "pingora-ketama", - "pingora-runtime", - "rand 0.8.5", - "tokio", -] - -[[package]] -name = "pingora-lru" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" -dependencies = [ - "arrayvec", - "hashbrown 0.16.1", - "parking_lot", - "rand 0.8.5", -] - -[[package]] -name = "pingora-pool" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "996c574f30a6e1ad10b47ac1626a86e0e47d5075953dd049d60df16ba5f7076e" -dependencies = [ - "crossbeam-queue", - "log", - "lru 0.14.0", - "parking_lot", - "pingora-timeout", - "thread_local", - "tokio", -] - -[[package]] -name = "pingora-proxy" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c4097fd2639905bf5b81f3618551cd826d5e03aac063e17fd7a4137f19c1a5b" -dependencies = [ - "async-trait", - "bytes", - "clap 3.2.25", - "futures", - "h2", - "http", - "log", - "once_cell", - "pingora-cache", - "pingora-core", - "pingora-error", - "pingora-http", - "rand 0.8.5", - "regex", - "tokio", -] - -[[package]] -name = "pingora-runtime" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccc165021cf55a39b9e760121b22c4260b17a0b2c530d5b93092fc5bc765b94" -dependencies = [ - "once_cell", - "rand 0.8.5", - "thread_local", - "tokio", + "syn 2.0.114", ] [[package]] -name = "pingora-timeout" -version = "0.6.0" +name = "pin-project-lite" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548cd21d41611c725827677937e68f2cd008bbfa09f3416d3fbad07e1e42f6d7" -dependencies = [ - "once_cell", - "parking_lot", - "pin-project-lite", - "thread_local", - "tokio", -] +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" @@ -4232,52 +3535,6 @@ dependencies = [ "toml_edit 0.23.10+spec-1.0.0", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" -dependencies = [ - "proc-macro2", - "quote", -] - -[[package]] -name = "proc-macro-error2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" -dependencies = [ - "proc-macro-error-attr2", - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "proc-macro2" version = "1.0.106" @@ -4300,36 +3557,6 @@ dependencies = [ "yansi", ] -[[package]] -name = "prometheus" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" -dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf 2.28.0", - "thiserror 1.0.69", -] - -[[package]] -name = "prometheus" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" -dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf 3.7.2", - "thiserror 2.0.18", -] - [[package]] name = "proptest" version = "1.9.0" @@ -4338,10 +3565,10 @@ checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.10.0", + "bitflags", "num-traits", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_xorshift", "regex-syntax", "rusty-fork", @@ -4376,7 +3603,7 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "heck 0.5.0", + "heck", "itertools 0.14.0", "log", "multimap", @@ -4426,32 +3653,6 @@ dependencies = [ "prost", ] -[[package]] -name = "protobuf" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" - -[[package]] -name = "protobuf" -version = "3.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" -dependencies = [ - "once_cell", - "protobuf-support", - "thiserror 1.0.69", -] - -[[package]] -name = "protobuf-support" -version = "3.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" -dependencies = [ - "thiserror 1.0.69", -] - [[package]] name = "protox" version = "0.9.1" @@ -4464,7 +3665,7 @@ dependencies = [ "prost-reflect", "prost-types", "protox-parse", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -4476,7 +3677,7 @@ dependencies = [ "logos", "miette", "prost-types", - "thiserror 2.0.18", + "thiserror", ] [[package]] @@ -4485,7 +3686,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.10.0", + "bitflags", "memchr", "unicase", ] @@ -4520,37 +3721,16 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - [[package]] name = "rand" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "rand_chacha 0.9.0", + "rand_chacha", "rand_core 0.9.5", ] -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - [[package]] name = "rand_chacha" version = "0.9.0" @@ -4632,7 +3812,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags", ] [[package]] @@ -4734,25 +3914,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rmp" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c" -dependencies = [ - "num-traits", -] - -[[package]] -name = "rmp-serde" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155" -dependencies = [ - "rmp", - "serde", -] - [[package]] name = "rocksdb" version = "0.24.0" @@ -4769,8 +3930,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" dependencies = [ - "hashbrown 0.16.1", - "thiserror 2.0.18", + "hashbrown", + "thiserror", ] [[package]] @@ -4802,16 +3963,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "rust_decimal" -version = "1.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" -dependencies = [ - "arrayvec", - "num-traits", -] - [[package]] name = "rustc-demangle" version = "0.1.27" @@ -4848,7 +3999,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.10.0", + "bitflags", "errno", "libc", "linux-raw-sys 0.4.15", @@ -4861,7 +4012,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags 2.10.0", + "bitflags", "errno", "libc", "linux-raw-sys 0.11.0", @@ -5004,7 +4155,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -5017,7 +4168,7 @@ version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -5113,17 +4264,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "serde_qs" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" -dependencies = [ - "percent-encoding", - "serde", - "thiserror 2.0.18", -] - [[package]] name = "serde_spanned" version = "0.6.9" @@ -5154,18 +4294,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap 1.9.3", - "ryu", - "serde", - "yaml-rust", -] - [[package]] name = "serial_test" version = "3.3.1" @@ -5192,17 +4320,6 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "sfv" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" -dependencies = [ - "base64", - "indexmap 2.13.0", - "rust_decimal", -] - [[package]] name = "sha2" version = "0.10.9" @@ -5259,12 +4376,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "simd-adler32" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" - [[package]] name = "siphasher" version = "1.0.2" @@ -5357,40 +4468,12 @@ dependencies = [ "vte", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "strum" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" -dependencies = [ - "heck 0.5.0", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.114", -] - [[package]] name = "subtle" version = "2.6.1" @@ -5466,7 +4549,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -5555,33 +4638,13 @@ dependencies = [ "unicode-width 0.2.2", ] -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", -] - [[package]] name = "thiserror" version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.18", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", + "thiserror-impl", ] [[package]] @@ -5604,16 +4667,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "thrift_codec" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d957f535b242b91aa9f47bde08080f9a6fef276477e55b0079979d002759d5" -dependencies = [ - "byteorder", - "trackable", -] - [[package]] name = "time" version = "0.3.47" @@ -5725,17 +4778,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "tokio-test" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6d24790a10a7af737693a3e8f1d03faef7e6ca0cc99aae5066f533766de545" -dependencies = [ - "futures-core", - "tokio", - "tokio-stream", -] - [[package]] name = "tokio-util" version = "0.7.18" @@ -5767,7 +4809,7 @@ version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.13.0", + "indexmap", "serde_core", "serde_spanned 1.0.4", "toml_datetime 0.7.5+spec-1.1.0", @@ -5800,7 +4842,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.13.0", + "indexmap", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -5814,7 +4856,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.13.0", + "indexmap", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -5972,7 +5014,7 @@ dependencies = [ "httparse", "js-sys", "pin-project", - "thiserror 2.0.18", + "thiserror", "tonic", "tower-service", "wasm-bindgen", @@ -5989,7 +5031,7 @@ checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.13.0", + "indexmap", "pin-project-lite", "slab", "sync_wrapper", @@ -6006,7 +5048,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.10.0", + "bitflags", "bytes", "futures-util", "http", @@ -6073,7 +5115,7 @@ checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" dependencies = [ "chrono", "smallvec", - "thiserror 2.0.18", + "thiserror", "tracing", "tracing-subscriber", ] @@ -6136,25 +5178,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trackable" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" -dependencies = [ - "trackable_derive", -] - -[[package]] -name = "trackable_derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -6275,17 +5298,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" -[[package]] -name = "uuid" -version = "1.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" -dependencies = [ - "getrandom 0.3.4", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "valuable" version = "0.1.1" @@ -6448,22 +5460,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - [[package]] name = "winapi-util" version = "0.1.11" @@ -6473,12 +5469,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - [[package]] name = "windows-core" version = "0.62.2" @@ -6865,7 +5855,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4ff3b651754a7bd216f959764d0a5ab6f4b551c9a3a08fb9ccecbed594b614a" dependencies = [ - "rand 0.9.2", + "rand", "winter-utils", ] @@ -6924,15 +5914,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" version = "1.0.1" @@ -7047,31 +6028,3 @@ name = "zmij" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" - -[[package]] -name = "zstd" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" -dependencies = [ - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "2.0.16+zstd.1.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" -dependencies = [ - "cc", - "pkg-config", -] diff --git a/Cargo.toml b/Cargo.toml index 116e3548c..219fd51b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,10 @@ version = "0.14.0" [profile.test.package.miden-crypto] opt-level = 2 +# Avoid running the expensive debug assertion in winter-prover +[profile.test.package.winter-prover] +debug-assertions = false + [profile.release] debug = true @@ -132,3 +136,7 @@ must_use_candidate = "allow" # This marks many fn's which isn't helpfu needless_for_each = "allow" # Context dependent if that's useful. should_panic_without_expect = "allow" # We don't care about the specific panic message. # End of pedantic lints. + +# Configure `cargo-typos` +[workspace.metadata.typos] +files.extend-exclude = ["*.svg"] # Ignore SVG files. diff --git a/Makefile b/Makefile index fd1408f70..8eb443544 100644 --- a/Makefile +++ b/Makefile @@ -107,7 +107,7 @@ install-node: ## Installs node .PHONY: install-remote-prover install-remote-prover: ## Install remote prover's CLI - $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --features concurrent --locked + $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --locked .PHONY: stress-test-smoke stress-test: ## Runs stress-test benchmarks diff --git a/bin/remote-prover/.env b/bin/remote-prover/.env index 05593e698..b7191203d 100644 --- a/bin/remote-prover/.env +++ b/bin/remote-prover/.env @@ -1,32 +1,6 @@ -# For more info use -h on the relevant commands: -# miden-remote-prover start-worker -h -# miden-remote-prover start-proxy -h +# For more info consult the help output: `miden-remote-prover --help` -# Proxy ############################ -# Port of the proxy -MRP_PORT=8082 -# Port to add / remove workers -MRP_CONTROL_PORT=8083 -# Uncomment the following line to enable Prometheus metrics on port 6192 -# MRP_METRICS_PORT=6192 -MRP_TIMEOUT=100s -MRP_CONNECTION_TIMEOUT=10s -MRP_MAX_QUEUE_ITEMS=10 -MRP_MAX_RETRIES_PER_REQUEST=1 -MRP_MAX_REQ_PER_SEC=5 -MRP_AVAILABLE_WORKERS_POLLING_INTERVAL=20ms -MRP_HEALTH_CHECK_INTERVAL=1s -MRP_ENABLE_METRICS=false -MRP_PROOF_TYPE=transaction -MRP_PROXY_WORKERS_LIST=127.0.0.1:50051 -MRP_GRACE_PERIOD=20s -MRP_GRACEFUL_SHUTDOWN_TIMEOUT=5s -RUST_LOG=info -#################################### - -# Worker ########################### -# Use 127.0.0.1 instead of 0.0.0.0 -MRP_WORKER_LOCALHOST=false -MRP_WORKER_PORT=50051 -MRP_WORKER_PROOF_TYPE=transaction -#################################### +MIDEN_PROVER_PORT=8082 +MIDEN_PROVER_KIND=transaction +MIDEN_PROVER_TIMEOUT=100s +MIDEN_PROVER_CAPACITY=10 diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index adb60f7a8..7a3b6a059 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -1,6 +1,6 @@ [package] authors.workspace = true -description = "Miden blockchain remote prover" +description = "Miden remote prover" edition.workspace = true homepage.workspace = true keywords = ["miden", "prover", "remote"] @@ -11,53 +11,33 @@ repository.workspace = true rust-version.workspace = true version.workspace = true -[[bin]] -name = "miden-remote-prover" -path = "src/main.rs" - -[features] -concurrent = ["miden-tx/concurrent"] -default = ["concurrent"] - [lints] workspace = true [dependencies] -anyhow = { workspace = true } -async-trait = { version = "0.1" } -axum = { version = "0.8" } -bytes = { version = "1.0" } -clap = { features = ["env"], workspace = true } -http = { workspace = true } -humantime = { workspace = true } -miden-block-prover = { workspace = true } -miden-node-proto = { workspace = true } -miden-node-utils = { workspace = true } -miden-protocol = { features = ["std"], workspace = true } -miden-tx = { features = ["std"], workspace = true } -miden-tx-batch-prover = { features = ["std"], workspace = true } -opentelemetry = { version = "0.31" } -pingora = { features = ["lb"], version = "0.6" } -pingora-core = { version = "0.6" } -pingora-limits = { version = "0.6" } -pingora-proxy = { version = "0.6" } -prometheus = { version = "0.14" } -prost = { default-features = false, features = ["derive"], workspace = true } -reqwest = { version = "0.12" } -semver = { version = "1.0" } -serde = { features = ["derive"], version = "1.0" } -serde_qs = { version = "0.15" } -thiserror = { workspace = true } -tokio = { features = ["full"], workspace = true } -tokio-stream = { features = ["net"], version = "0.1" } -tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } -tonic-health = { version = "0.14" } -tonic-prost = { workspace = true } -tonic-web = { version = "0.14" } -tower-http = { features = ["trace"], workspace = true } -tracing = { workspace = true } -tracing-opentelemetry = { version = "0.32" } -uuid = { features = ["v4"], version = "1.16" } +anyhow = { workspace = true } +async-trait = { version = "0.1" } +clap = { features = ["env"], workspace = true } +http = { workspace = true } +humantime = { workspace = true } +miden-block-prover = { workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-protocol = { features = ["std"], workspace = true } +miden-tx = { features = ["concurrent", "std"], workspace = true } +miden-tx-batch-prover = { features = ["std"], workspace = true } +opentelemetry = { version = "0.31" } +prost = { default-features = false, features = ["derive"], workspace = true } +tokio = { features = ["full"], workspace = true } +tokio-stream = { features = ["net"], version = "0.1" } +tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } +tonic-health = { version = "0.14" } +tonic-prost = { workspace = true } +tonic-reflection = { workspace = true } +tonic-web = { version = "0.14" } +tower-http = { features = ["trace"], workspace = true } +tracing = { workspace = true } [dev-dependencies] miden-protocol = { features = ["testing"], workspace = true } diff --git a/bin/remote-prover/README.md b/bin/remote-prover/README.md index 476e2293f..364cfd56b 100644 --- a/bin/remote-prover/README.md +++ b/bin/remote-prover/README.md @@ -1,14 +1,42 @@ # Miden remote prover -A service for generating Miden proofs on-demand. The binary enables spawning workers and a proxy for Miden's remote prover. It currently supports proving individual transactions, transaction batches, and blocks. +A gRPC server which provides a service for proving either transactions, batches or blocks for the Miden blockchain. -A worker is a gRPC service that can receive transaction witnesses, proposed batches, or proposed blocks, prove them, and return the generated proofs. It can handle only one request at a time and will return an error if it is already in use. Each worker is specialized on startup to handle exactly one type of proof requests - transactions, batches, or blocks. +This enables weaker devices to offload the proof generation to a beefy remote server running this service. -The proxy uses [Cloudflare's Pingora crate](https://crates.io/crates/pingora), which provides features to create a modular proxy. It is meant to handle multiple workers with a queue, assigning a worker to each request and retrying if the worker is not available. Further information about Pingora and its features can be found in the [official GitHub repository](https://github.com/cloudflare/pingora). +The implementation provides a configurable request queue and proves one request at a time in FIFO order. This is not intended to cover +complex proxy setups nor load-balancing, but can instead be used as a starting point for more advanced setups. -## Debian Installation +The gRPC specification can be found in the [Miden repository](https://github.com/0xMiden/miden-node/blob/main/proto/proto/remote_prover.proto). +Ensure you are viewing the appropriate version tag or commit. -#### Prover +## Quick start + +```bash +# Install the binary. +cargo install miden-remote-prover --locked + +# and start as a transaction prover. +miden-remote-prover \ + --kind transaction \ # Specify the kind of proof to generate (transaction, batch, or block) + --port 50051 +``` + +In a separate terminal, inspect the available services using grpcurl and reflection. + +```bash +grpcurl -plaintext localhost:50051 list +``` + +or query the status of the prover. + +```bash +grpcurl -plaintext localhost:50051 remote_prover.WorkerStatusApi/Status +``` + +## Installation + +### Debian package Install the Debian package: @@ -33,32 +61,7 @@ sudo systemctl enable miden-prover sudo systemctl start miden-prover ``` -#### Prover Proxy - -```bash -set -e - -sudo wget https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-proxy-v0.8-arm64.deb -O prover-proxy.deb -sudo wget -q -O - https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-proxy-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover-proxy.checksum -sudo sha256sum prover-proxy.deb | awk '{print $1}' > prover-proxy.sha256 -sudo diff prover-proxy.sha256 prover-proxy.checksum -sudo dpkg -i prover-proxy.deb -sudo rm prover-proxy.deb -``` - -Edit the configuration file `/lib/systemd/system/miden-prover-proxy.service.env` - -Edit the service file to specify workers `/lib/systemd/system/miden-prover-proxy.service` - -Run the service: - -```bash -sudo systemctl daemon-reload -sudo systemctl enable miden-prover-proxy -sudo systemctl start miden-prover-proxy -``` - -## Source Installation +### From source To build the service from a local version, from the root of the workspace you can run: @@ -68,274 +71,73 @@ make install-remote-prover The CLI can be installed from the source code using specific git revisions with `cargo install` or from crates.io with `cargo install miden-remote-prover`. -## Worker - -To start the worker service you will need to run: - -```bash -miden-remote-prover start-worker --port 8082 --prover-type transaction -``` - -This will spawn a worker using the port defined in the command option. The host will be 0.0.0.0 by default, or 127.0.0.1 if the --localhost flag is used. In case that the port is not provided, it will default to `50051`. This command will start a worker that can handle transaction and batch proving requests. - -The `--prover-type` flag is required and specifies which type of proof the worker will handle. The available options are: - -- `transaction`: For transaction proofs -- `batch`: For batch proofs -- `block`: For block proofs - -Each worker can only handle one type of proof. If you need to handle multiple proof types, you should start multiple workers, each with a different proof type. Additionally, you can use the `--localhost` flag to bind to 127.0.0.1 instead of 0.0.0.0. - -### Worker Configuration - -The worker can be configured using the following environment variables: - -| Variable | Description | Default | -|---------------------------|---------------------------------|---------------| -| `MRP_WORKER_LOCALHOST` | Use localhost (127.0.0.1) | `false` | -| `MRP_WORKER_PORT` | The port number for the worker | `50051` | -| `MRP_WORKER_PROOF_TYPE` | The supported prover type | `transaction` | +## Configuration -For example: +Quick start: ```bash -export MRP_WORKER_LOCALHOST="true" -export MRP_WORKER_PORT="8082" -export MRP_WORKER_PROOF_TYPE="block" -miden-remote-prover start-worker +miden-remote-prover --kind transaction ``` -## Proxy - -To start the proxy service, you will need to run: - -```bash -miden-remote-prover start-proxy --prover-type transaction --workers [worker1],[worker2],...,[workerN] -``` - -For example: +The prover can be further configured from the command line or using environment variables as per the help message: ```bash -miden-remote-prover start-proxy --prover-type transaction --workers 0.0.0.0:8084,0.0.0.0:8085 -``` +> miden-remote-prover --help -This command will start the proxy using the workers passed as arguments. The workers should be in the format `host:port`. Another way to specify the workers is by using the `MRP_PROXY_WORKERS_LIST` environment variable, which can be set to a comma-separated list of worker addresses. For example: +Usage: miden-remote-prover [OPTIONS] --kind -```bash -export MRP_PROXY_WORKERS_LIST="0.0.0.0:8084,0.0.0.0:8085" -``` +Options: + --port + The port the gRPC server will be hosted on -If no workers are passed, the proxy will start without any workers and will not be able to handle any requests until one is added through the `miden-remote-prover add-worker` command. + [env: MIDEN_PROVER_PORT=] + [default: 50051] -The `--prover-type` flag is required and specifies which type of proof the proxy will handle. The available options are: + --kind + The proof type that the prover will be handling -- `transaction`: For transaction proofs -- `batch`: For batch proofs -- `block`: For block proofs + [env: MIDEN_PROVER_KIND=] + [possible values: transaction, batch, block] -The proxy can only handle one type of proof at a time. When you add workers to the proxy, it will check their supported proof type. Workers that support a different proof type than the proxy will be marked as unhealthy and will not be used for proving requests. + --timeout + Maximum time allowed for a proof request to complete. Once exceeded, the request is aborted -For example, if you start a proxy with `--prover-type transaction` and add these workers: + [env: MIDEN_PROVER_TIMEOUT=] + [default: 60s] -- Worker 1: Transaction proofs (Healthy) -- Worker 2: Batch proofs (Unhealthy - incompatible proof type) -- Worker 3: Block proofs (Unhealthy - incompatible proof type) + --capacity + Maximum number of concurrent proof requests that the prover will allow. -Only Worker 1 will be used for proving requests, while Workers 2 and 3 will be marked as unhealthy due to incompatible proof types. + Note that the prover only proves one request at a time; the rest are queued. + This capacity is used to limit the number of requests that can be queued at any given time, + and includes the one request that is currently being processed. -You can customize the proxy service by setting environment variables. Possible customizations can be found by running `miden-remote-prover start-proxy --help`. + [env: MIDEN_PROVER_CAPACITY=] + [default: 1] -An example `.env` file is provided in the crate's root directory. To use the variables from a file in any Unix-like operating system, you can run `source `. - -At the moment, when a worker added to the proxy stops working and can not connect to it for a request, the connection is marked as retriable meaning that the proxy will try reaching another worker. The number of retries is configurable via the `MRP_MAX_RETRIES_PER_REQUEST` environmental variable. - -## Updating workers on a running proxy - -To update the workers on a running proxy, two commands are provided: `add-workers` and `remove-workers`. These commands will update the workers on the proxy and will not require a restart. To use these commands, you will need to run: - -```bash -miden-remote-prover add-workers --control-port [worker1],[worker2],...,[workerN] -miden-remote-prover remove-workers --control-port [worker1],[worker2],...,[workerN] + -h, --help + Print help (see a summary with '-h') ``` -For example: +## Status, health and monitoring -```bash -# To add 0.0.0.0:8085 and 200.58.70.4:50051 to the workers list: -miden-remote-prover add-workers --control-port 8083 0.0.0.0:8085,200.58.70.4:50051 -# To remove 158.12.12.3:8080 and 122.122.6.6:50051 from the workers list: -miden-remote-prover remove-workers --control-port 8083 158.12.12.3:8080,122.122.6.6:50051 -``` +The server implements the following health and status related gRPC services: -These commands can receive the list of workers to update as a comma-separated list of addresses through the `MRP_PROXY_WORKERS_LIST` environment variable, or as command-line arguments: +- [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) +- [gRPC Reflection](https://grpc.io/docs/guides/reflection/) +- [WorkerStatusApi](https://github.com/0xMiden/miden-node/blob/main/proto/proto/remote_prover.proto) -```bash -export MRP_PROXY_WORKERS_LIST="0.0.0.0:8085,200.58.70.4:50051" -miden-remote-prover add-workers --control-port 8083 -miden-remote-prover remove-workers --control-port 8083 -``` +The server supports OpenTelemetry traces which can be configured using the environment variables specified in the OpenTelemetry documentation. -The `--control-port` flag is required to specify the port where the proxy is listening for updates. The workers are passed as arguments in the format `host:port`. The port can be specified via the `MRP_CONTROL_PORT` environment variable. For example: +For example, to send the traces to [HoneyComb](https://www.honeycomb.io/): ```bash -export MRP_CONTROL_PORT="8083" -miden-remote-prover add-workers 0.0.0.0:8085 +OTEL_SERVICE_NAME=miden-remote-prover +OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io +OTEL_EXPORTER_OTLP_HEADERS=x-honeycomb-team= ``` -Note that, in order to update the workers, the proxy must be running in the same computer as the command is being executed because it will check if the client address is localhost to avoid any security issues. - -### Health check - -The worker service implements the [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) standard, and includes the methods described in this [official proto file](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto). - -The proxy service uses this health check to determine if a worker is available to receive requests. If a worker is not available, it will be removed from the set of workers that the proxy can use to send requests. - -### Status check - -The worker service implements a custom status check that returns information about the worker's current state and supported proof type. The proxy service uses this status check to determine if a worker is available to receive requests and if it supports the required proof type. If a worker is not available or doesn't support the required proof type, it will be removed from the set of workers that the proxy can use to send requests. - -The status check returns: - -- Whether the worker is ready to process requests -- The type of proofs the worker supports (transaction, batch, or block proofs) -- The version of the worker - -### Proxy Status Endpoint - -The proxy service exposes a gRPC status endpoint that provides information about the current state of the proxy and its workers. This endpoint implements the `ProxyStatusApi` service defined in `proxy_status.proto`. - -#### gRPC Service Definition - -The status service provides the following method: - -- `Status(ProxyStatusRequest) -> ProxyStatusResponse`: Returns the current status of the proxy and all its workers - -#### Response Format - -The gRPC response includes the following information: - -- `version`: The version of the proxy -- `supported_proof_type`: The type of proof that the proxy supports (`TRANSACTION`, `BATCH`, or `BLOCK`) -- `workers`: A list of workers with their status information - -Each worker status includes: - -- `address`: The worker's network address -- `version`: The worker's version -- `status`: The worker's health status (`UNKNOWN`, `HEALTHY`, or `UNHEALTHY`) - -#### Example Usage - -You can query the status endpoint using a gRPC client. For example, using `grpcurl`: - -```bash -# Assuming the proxy is running on port 8084 -grpcurl -plaintext -import-path ./proto -proto proxy_status.proto \ - -d '{}' localhost:8084 proxy_status.ProxyStatusApi.Status -``` - -Example response: - -```json -{ - "version": "0.8.0", - "supported_proof_type": "TRANSACTION", - "workers": [ - { - "address": "0.0.0.0:50051", - "version": "0.8.0", - "status": "UNHEALTHY" - }, - { - "address": "0.0.0.0:50052", - "version": "0.8.0", - "status": "HEALTHY" - } - ] -} -``` - -The status endpoint is integrated into the main proxy service and uses the same port as the proxy. The status information is automatically updated during health checks, ensuring it reflects the current state of all workers. - -## Logging and Tracing - -The service uses the [`tracing`](https://docs.rs/tracing/latest/tracing/) crate for both logging and distributed tracing, providing structured, high-performance logs and trace data. - -By default, logs are written to `stdout` and the default logging level is `info`. This can be changed via the `RUST_LOG` environment variable. For example: - -``` -export RUST_LOG=debug -``` - -For tracing, we use OpenTelemetry protocol. By default, traces are exported to the endpoint specified by `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable. To consume and visualize these traces we can use Jaeger or any other OpenTelemetry compatible consumer. - -The simplest way to install Jaeger is by using a [Docker](https://www.docker.com/) container. To do so, run: - -```bash -docker run -d -p4317:4317 -p16686:16686 jaegertracing/all-in-one:latest -``` - -Then access the Jaeger UI at `http://localhost:16686/`. - -If Docker is not an option, Jaeger can also be set up directly on your machine or hosted in the cloud. See the [Jaeger documentation](https://www.jaegertracing.io/docs/) for alternative installation methods. - -## Metrics - -The proxy includes a service that can optionally expose metrics to be consumed by [Prometheus](https://prometheus.io/docs/introduction/overview/). This service is enabled by specifying a metrics port. - -### Enabling Prometheus Metrics - -To enable Prometheus metrics, simply specify a port on which to expose the metrics. This can be done via environment variables or command-line arguments. - -#### Using Environment Variables - -Set the following environment variable: - -```bash -export MRP_METRICS_PORT=6192 # Set to enable metrics on port 6192 -``` - -To disable metrics, simply don't set the MRP_METRICS_PORT environment variable. - -#### Using Command-Line Arguments - -Specify a metrics port using the `--metrics-port` flag when starting the proxy: - -```bash -miden-remote-prover start-proxy --metrics-port 6192 [worker1] [worker2] ... [workerN] -``` - -If you don't specify a metrics port, metrics will be disabled. - -When enabled, the Prometheus metrics will be available at `http://0.0.0.0:` (e.g., `http://0.0.0.0:6192`). - -The metrics architecture works by having the proxy expose metrics at an endpoint (`/metrics`) in a format Prometheus can read. Prometheus periodically scrapes this endpoint, adds timestamps to the metrics, and stores them in its time-series database. Then, we can use tools like Grafana to query Prometheus and visualize these metrics in configurable dashboards. - -The simplest way to install Prometheus and Grafana is by using Docker containers. To do so, run: - -```bash -docker run \ - -d \ - -p 9090:9090 \ - -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \ - prom/prometheus - -docker run -d -p 3000:3000 --name grafana grafana/grafana-enterprise:latest -``` - -In case that Docker is not an option, Prometheus and Grafana can also be set up directly on your machine or hosted in the cloud. See the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/getting_started/) and [Grafana documentation](https://grafana.com/docs/grafana/latest/setup-grafana/) for alternative installation methods. - -A prometheus configuration file is provided in this repository, you will need to modify the `scrape_configs` section to include the URL of the proxy service (e.g., `http://0.0.0.0:6192`). - -Then, to add the new Prometheus collector as a datasource for Grafana, you can [follow this tutorial](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/existing-datasource/). A Grafana dashboard under the name `proxy_grafana_dashboard.json` is provided, see this [link](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/import-dashboards/) to import it. Otherwise, you can [create your own dashboard](https://grafana.com/docs/grafana/latest/getting-started/build-first-dashboard/) using the metrics provided by the proxy and export it by following this [link](https://grafana.com/docs/grafana/latest/dashboards/share-dashboards-panels/#export-a-dashboard-as-json). - -## Features - -Description of this crate's feature: - -| Features | Description | -| ------------ | ------------------------------------------------------ | -| `concurrent` | Enables concurrent code to speed up runtime execution. | +A self-hosted alternative is [Jaeger](https://www.jaegertracing.io/). ## License diff --git a/bin/remote-prover/grafana_dashboard.json b/bin/remote-prover/grafana_dashboard.json deleted file mode 100644 index bc391feba..000000000 --- a/bin/remote-prover/grafana_dashboard.json +++ /dev/null @@ -1,1082 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 1, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 18, - "panels": [], - "title": "Requests", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "red", - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqpm" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Total requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Failed requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "red", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Accepted requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "green", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 9, - "x": 0, - "y": 1 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_count[1m]))", - "hide": false, - "instant": false, - "legendFormat": "Total requests", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_count[1m])) - sum(rate(rate_limited_requests[1m])) - sum(rate(queue_drop_count[1m]))", - "hide": false, - "instant": false, - "legendFormat": "Accepted requests", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_failure_count[1m]))", - "legendFormat": "Failed requests", - "range": true, - "refId": "A" - } - ], - "title": "Requests", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqpm" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Rate limited requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "orange", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Queue overflow requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "purple", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 9, - "x": 9, - "y": 1 - }, - "id": 16, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(rate_limited_requests[1m])", - "hide": false, - "instant": false, - "legendFormat": "Rate limited requests", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(queue_drop_count[1m])", - "hide": false, - "instant": false, - "legendFormat": "Queue overflow requests", - "range": true, - "refId": "C" - } - ], - "title": "Rejected requests", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-YlRd" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 5, - "x": 18, - "y": 1 - }, - "id": 17, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(request_retries[1m])", - "legendFormat": "Retry rate", - "range": true, - "refId": "A" - } - ], - "title": "Request retry rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 9 - }, - "id": 13, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "(1 - rate(request_failure_count[1m]) / rate(request_count[1m])) * 100", - "legendFormat": "Success rate over time", - "range": true, - "refId": "A" - } - ], - "title": "Success rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 8, - "y": 9 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(request_latency_sum[1m]) / rate(request_latency_count[1m])", - "legendFormat": "Average request latency", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(queue_latency_sum[1m]) / rate(queue_latency_count[1m])", - "hide": false, - "instant": false, - "legendFormat": "Average queue latency", - "range": true, - "refId": "B" - } - ], - "title": "Latency", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 19, - "panels": [], - "title": "Workers", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 0, - "y": 18 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "worker_count", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total workers", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "worker_busy", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Busy workers", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Workers", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "red", - "mode": "fixed" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMax": 3, - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 7, - "y": 18 - }, - "id": 21, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "worker_unhealthy", - "legendFormat": "{{worker_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Unhealthy workers", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 14, - "y": 18 - }, - "id": 12, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(worker_request_count[1m])", - "legendFormat": "{{worker_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Requests per worker", - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 20, - "panels": [], - "title": "Queue", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 15 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 27 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "queue_size", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Queue size", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Queue size", - "type": "timeseries" - } - ], - "preload": false, - "refresh": "5s", - "schemaVersion": 40, - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "2025-03-31T19:02:51.110Z", - "to": "2025-03-31T19:04:03.015Z" - }, - "timepicker": {}, - "timezone": "browser", - "title": "tx_prover", - "uid": "be7bobzl5fr40f", - "version": 6, - "weekStart": "" -} diff --git a/bin/remote-prover/prometheus.yml b/bin/remote-prover/prometheus.yml deleted file mode 100644 index 817e92f24..000000000 --- a/bin/remote-prover/prometheus.yml +++ /dev/null @@ -1,16 +0,0 @@ -global: - scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. - evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. - # scrape_timeout is set to the global default (10s). - -# A scrape configuration containing exactly one endpoint to scrape: -scrape_configs: - # The job name is a label that is used to group targets in the Prometheus UI. - # It can be any string. - - job_name: "remote_prover" - # Here you need to specify the address of the Prometheus service endpoint in the proxy - # We use the default port for Prometheus, but it need to be changed if you use a different host - # or port. In case of using Prometheus in a docker container, you can use the - # `host.docker.internal` address to access the host machine. - static_configs: - - targets: ["127.0.0.1:6192"] diff --git a/bin/remote-prover/src/api/mod.rs b/bin/remote-prover/src/api/mod.rs deleted file mode 100644 index 4aee8807b..000000000 --- a/bin/remote-prover/src/api/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use tokio::net::TcpListener; - -use crate::generated::api_server::ApiServer; -use crate::generated::worker_status_api_server::WorkerStatusApiServer; - -pub(crate) mod prover; -mod status; - -pub use prover::{ProofType, ProverRpcApi}; - -pub struct RpcListener { - pub api_service: ApiServer, - pub status_service: WorkerStatusApiServer, - pub listener: TcpListener, -} - -impl RpcListener { - pub fn new(listener: TcpListener, proof_type: ProofType) -> Self { - let prover_rpc_api = ProverRpcApi::new(proof_type); - let status_rpc_api = status::StatusRpcApi::new(proof_type); - let api_service = ApiServer::new(prover_rpc_api); - let status_service = WorkerStatusApiServer::new(status_rpc_api); - Self { api_service, status_service, listener } - } -} diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs deleted file mode 100644 index 9af8f8eb3..000000000 --- a/bin/remote-prover/src/api/prover.rs +++ /dev/null @@ -1,352 +0,0 @@ -use miden_block_prover::LocalBlockProver; -use miden_node_proto::BlockProofRequest; -use miden_node_utils::ErrorReport; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; -use miden_protocol::batch::ProposedBatch; -use miden_protocol::transaction::TransactionInputs; -use miden_protocol::utils::Serializable; -use miden_tx::LocalTransactionProver; -use miden_tx_batch_prover::LocalBatchProver; -use serde::{Deserialize, Serialize}; -use tokio::sync::Mutex; -use tonic::{Request, Response, Status}; -use tracing::{info, instrument}; - -use crate::COMPONENT; -use crate::generated::api_server::Api as ProverApi; -use crate::generated::{self as proto}; - -/// Specifies the type of proof supported by the remote prover. -#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] -pub enum ProofType { - #[default] - Transaction, - Batch, - Block, -} - -impl std::fmt::Display for ProofType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ProofType::Transaction => write!(f, "transaction"), - ProofType::Batch => write!(f, "batch"), - ProofType::Block => write!(f, "block"), - } - } -} - -impl std::str::FromStr for ProofType { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "transaction" => Ok(ProofType::Transaction), - "batch" => Ok(ProofType::Batch), - "block" => Ok(ProofType::Block), - _ => Err(format!("Invalid proof type: {s}")), - } - } -} - -/// The prover for the remote prover. -/// -/// This enum is used to store the prover for the remote prover. -/// Only one prover is enabled at a time. -enum Prover { - Transaction(Mutex), - Batch(Mutex), - Block(Mutex), -} - -impl Prover { - fn new(proof_type: ProofType) -> Self { - match proof_type { - ProofType::Transaction => { - info!(target: COMPONENT, proof_type = ?proof_type, "Transaction prover initialized"); - Self::Transaction(Mutex::new(LocalTransactionProver::default())) - }, - ProofType::Batch => { - info!(target: COMPONENT, proof_type = ?proof_type, security_level = MIN_PROOF_SECURITY_LEVEL, "Batch prover initialized"); - Self::Batch(Mutex::new(LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL))) - }, - ProofType::Block => { - info!(target: COMPONENT, proof_type = ?proof_type, security_level = MIN_PROOF_SECURITY_LEVEL, "Block prover initialized"); - Self::Block(Mutex::new(LocalBlockProver::new(MIN_PROOF_SECURITY_LEVEL))) - }, - } - } -} - -pub struct ProverRpcApi { - prover: Prover, -} - -impl ProverRpcApi { - pub fn new(proof_type: ProofType) -> Self { - let prover = Prover::new(proof_type); - - Self { prover } - } - - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_tx", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, transaction_id = tracing::field::Empty), - err - )] - pub async fn prove_tx( - &self, - tx_inputs: TransactionInputs, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Transaction(prover) = &self.prover else { - return Err(Status::unimplemented("Transaction prover is not enabled")); - }; - - let locked_prover = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))?; - - // Add a small delay to simulate longer proving time for testing - #[cfg(test)] - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - let proof = locked_prover.prove(tx_inputs).map_err(internal_error)?; - - // Record the transaction_id in the current tracing span - let transaction_id = proof.id(); - tracing::Span::current().record("transaction_id", tracing::field::display(&transaction_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proof.to_bytes() })) - } - - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_batch", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, batch_id = tracing::field::Empty), - err - )] - pub fn prove_batch( - &self, - proposed_batch: ProposedBatch, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Batch(prover) = &self.prover else { - return Err(Status::unimplemented("Batch prover is not enabled")); - }; - - let proven_batch = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(proposed_batch) - .map_err(internal_error)?; - - // Record the batch_id in the current tracing span - let batch_id = proven_batch.id(); - tracing::Span::current().record("batch_id", tracing::field::display(&batch_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proven_batch.to_bytes() })) - } - - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_block", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, block_id = tracing::field::Empty), - err - )] - pub fn prove_block( - &self, - proof_request: BlockProofRequest, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Block(prover) = &self.prover else { - return Err(Status::unimplemented("Block prover is not enabled")); - }; - let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; - - // Record the commitment of the block in the current tracing span. - let block_id = block_header.commitment(); - tracing::Span::current().record("block_id", tracing::field::display(&block_id)); - - let block_proof = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(tx_batches, &block_header, block_inputs) - .map_err(internal_error)?; - - Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) - } -} - -#[async_trait::async_trait] -impl ProverApi for ProverRpcApi { - #[instrument( - target = COMPONENT, - name = "remote_prover.prove", - skip_all, - ret(level = "debug"), - fields(request_id = tracing::field::Empty), - err - )] - async fn prove( - &self, - request: Request, - ) -> Result, tonic::Status> { - // Extract X-Request-ID header for trace correlation - let request_id = request - .metadata() - .get("x-request-id") - .and_then(|v| v.to_str().ok()) - .unwrap_or("unknown") - .to_string(); // Convert to owned string to avoid lifetime issues - - // Record the request_id in the current tracing span - tracing::Span::current().record("request_id", &request_id); - - // Extract the proof type and payload - let proof_request = request.into_inner(); - let proof_type = proof_request.proof_type(); - - match proof_type { - proto::remote_prover::ProofType::Transaction => { - let tx_inputs = proof_request.try_into().map_err(invalid_argument)?; - self.prove_tx(tx_inputs, &request_id).await - }, - proto::remote_prover::ProofType::Batch => { - let proposed_batch = proof_request.try_into().map_err(invalid_argument)?; - self.prove_batch(proposed_batch, &request_id) - }, - proto::remote_prover::ProofType::Block => { - let proof_request = proof_request.try_into().map_err(invalid_argument)?; - self.prove_block(proof_request, &request_id) - }, - } - } -} - -// UTILITIES -// ================================================================================================ - -fn internal_error(err: E) -> Status { - Status::internal(err.as_report()) -} - -fn invalid_argument(err: E) -> Status { - Status::invalid_argument(err.as_report()) -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod test { - use std::time::Duration; - - use miden_node_utils::cors::cors_for_grpc_web_layer; - use miden_protocol::asset::{Asset, FungibleAsset}; - use miden_protocol::note::NoteType; - use miden_protocol::testing::account_id::{ - ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, - ACCOUNT_ID_SENDER, - }; - use miden_protocol::transaction::ProvenTransaction; - use miden_testing::{Auth, MockChainBuilder}; - use miden_tx::utils::Serializable; - use tokio::net::TcpListener; - use tonic::Request; - use tonic_web::GrpcWebLayer; - - use crate::api::ProverRpcApi; - use crate::generated::api_client::ApiClient; - use crate::generated::api_server::ApiServer; - use crate::generated::{self as proto}; - - #[tokio::test(flavor = "multi_thread", worker_threads = 3)] - async fn test_prove_transaction() { - // Start the server in the background - let listener = TcpListener::bind("127.0.0.1:50052").await.unwrap(); - - let proof_type = proto::remote_prover::ProofType::Transaction; - - let api_service = ApiServer::new(ProverRpcApi::new(proof_type.into())); - - // Spawn the server as a background task - tokio::spawn(async move { - tonic::transport::Server::builder() - .accept_http1(true) - .layer(cors_for_grpc_web_layer()) - .layer(GrpcWebLayer::new()) - .add_service(api_service) - .serve_with_incoming(tokio_stream::wrappers::TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - - // Give the server some time to start - tokio::time::sleep(Duration::from_secs(1)).await; - - // Set up a gRPC client to send the request - let mut client = ApiClient::connect("http://127.0.0.1:50052").await.unwrap(); - let mut client_2 = ApiClient::connect("http://127.0.0.1:50052").await.unwrap(); - - // Create a mock transaction to send to the server - let mut mock_chain_builder = MockChainBuilder::new(); - let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); - - let fungible_asset_1: Asset = - FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) - .unwrap() - .into(); - let note_1 = mock_chain_builder - .add_p2id_note( - ACCOUNT_ID_SENDER.try_into().unwrap(), - account.id(), - &[fungible_asset_1], - NoteType::Private, - ) - .unwrap(); - - let mock_chain = mock_chain_builder.build().unwrap(); - - let tx_context = mock_chain - .build_tx_context(account.id(), &[note_1.id()], &[]) - .unwrap() - .build() - .unwrap(); - - let executed_transaction = Box::pin(tx_context.execute()).await.unwrap(); - let tx_inputs = executed_transaction.tx_inputs(); - - let request_1 = Request::new(proto::remote_prover::ProofRequest { - proof_type: proto::remote_prover::ProofType::Transaction.into(), - payload: tx_inputs.to_bytes(), - }); - - let request_2 = Request::new(proto::remote_prover::ProofRequest { - proof_type: proto::remote_prover::ProofType::Transaction.into(), - payload: tx_inputs.to_bytes(), - }); - - // Send both requests concurrently - let (response_1, response_2) = - tokio::join!(client.prove(request_1), client_2.prove(request_2)); - - // Check the success response - assert!(response_1.is_ok() || response_2.is_ok()); - - // Check the failure response - assert!(response_1.is_err() || response_2.is_err()); - - let response_success = response_1.or(response_2).unwrap(); - - // Cast into a ProvenTransaction - let _proven_transaction: ProvenTransaction = - response_success.into_inner().try_into().expect("Failed to convert response"); - } -} diff --git a/bin/remote-prover/src/commands/mod.rs b/bin/remote-prover/src/commands/mod.rs deleted file mode 100644 index 13b21d8a5..000000000 --- a/bin/remote-prover/src/commands/mod.rs +++ /dev/null @@ -1,125 +0,0 @@ -use std::time::Duration; - -use clap::Parser; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use proxy::StartProxy; -use tracing::instrument; -use update_workers::{AddWorkers, RemoveWorkers, UpdateWorkers}; -use worker::StartWorker; - -pub mod proxy; -pub mod update_workers; -pub mod worker; - -pub(crate) const PROXY_HOST: &str = "0.0.0.0"; - -#[derive(Debug, Parser)] -pub(crate) struct ProxyConfig { - /// Interval at which the system polls for available workers to assign new - /// tasks. - #[arg(long, default_value = "20ms", env = "MRP_AVAILABLE_WORKERS_POLLING_INTERVAL", value_parser = humantime::parse_duration)] - pub(crate) available_workers_polling_interval: Duration, - /// Maximum time to establish a connection. - #[arg(long, default_value = "10s", env = "MRP_CONNECTION_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) connection_timeout: Duration, - /// Health check interval. - #[arg(long, default_value = "10s", env = "MRP_HEALTH_CHECK_INTERVAL", value_parser = humantime::parse_duration)] - pub(crate) health_check_interval: Duration, - /// Maximum number of items in the queue. - #[arg(long, default_value = "10", env = "MRP_MAX_QUEUE_ITEMS")] - pub(crate) max_queue_items: usize, - /// Maximum number of requests per second per IP address. - #[arg(long, default_value = "5", env = "MRP_MAX_REQ_PER_SEC")] - pub(crate) max_req_per_sec: isize, - /// Maximum number of retries per request. - #[arg(long, default_value = "1", env = "MRP_MAX_RETRIES_PER_REQUEST")] - pub(crate) max_retries_per_request: usize, - /// Metrics configurations. - #[command(flatten)] - pub(crate) metrics_config: MetricsConfig, - /// Port of the proxy. - #[arg(long, default_value = "8082", env = "MRP_PORT")] - pub(crate) port: u16, - /// Maximum time allowed for a request to complete. Once exceeded, the request is - /// aborted. - #[arg(long, default_value = "100s", env = "MRP_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) timeout: Duration, - /// Control port. - /// - /// Port used to add and remove workers from the proxy. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - pub(crate) control_port: u16, - /// Supported proof type. - /// - /// The type of proof the proxy will handle. Only workers that support the same proof type - /// will be able to connect to the proxy. - #[arg(long, default_value = "transaction", env = "MRP_PROOF_TYPE")] - pub(crate) proof_type: ProofType, - /// Grace period before starting the final step of the graceful shutdown after - /// signaling shutdown. - #[arg(long, default_value = "20s", env = "MRP_GRACE_PERIOD", value_parser = humantime::parse_duration)] - pub(crate) grace_period: std::time::Duration, - /// Timeout of the final step for the graceful shutdown. - #[arg(long, default_value = "5s", env = "MRP_GRACEFUL_SHUTDOWN_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) graceful_shutdown_timeout: std::time::Duration, -} - -#[derive(Debug, Clone, clap::Parser)] -pub struct MetricsConfig { - /// Port for Prometheus-compatible metrics - /// If specified, metrics will be enabled on this port. If not specified, metrics will be - /// disabled. - #[arg(long, env = "MRP_METRICS_PORT")] - pub metrics_port: Option, -} - -/// Root CLI struct -#[derive(Parser, Debug)] -#[command( - name = "miden-remote-prover", - about = "A stand-alone service for proving Miden transactions.", - version, - rename_all = "kebab-case" -)] -pub struct Cli { - #[command(subcommand)] - action: Command, -} - -/// CLI actions -#[derive(Debug, Parser)] -pub enum Command { - /// Starts the workers with the configuration defined in the command. - StartWorker(StartWorker), - /// Starts the proxy. - StartProxy(StartProxy), - /// Adds workers to the proxy. - /// - /// This command will make a request to the proxy to add the specified workers. - AddWorkers(AddWorkers), - /// Removes workers from the proxy. - /// - /// This command will make a request to the proxy to remove the specified workers. - RemoveWorkers(RemoveWorkers), -} - -/// CLI entry point -impl Cli { - #[instrument(target = COMPONENT, name = "cli.execute", skip_all, ret(level = "info"), err)] - pub async fn execute(&self) -> anyhow::Result<()> { - match &self.action { - // For the `StartWorker` command, we need to create a new runtime and run the worker - Command::StartWorker(worker_init) => worker_init.execute().await, - Command::StartProxy(proxy_init) => proxy_init.execute().await, - Command::AddWorkers(update_workers) => { - let update_workers: UpdateWorkers = update_workers.clone().into(); - update_workers.execute().await - }, - Command::RemoveWorkers(update_workers) => { - let update_workers: UpdateWorkers = update_workers.clone().into(); - update_workers.execute().await - }, - } - } -} diff --git a/bin/remote-prover/src/commands/proxy.rs b/bin/remote-prover/src/commands/proxy.rs deleted file mode 100644 index e9266c948..000000000 --- a/bin/remote-prover/src/commands/proxy.rs +++ /dev/null @@ -1,129 +0,0 @@ -use clap::Parser; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::error::RemoteProverError; -use pingora::apps::HttpServerOptions; -use pingora::prelude::{Opt, background_service}; -use pingora::server::Server; -use pingora::server::configuration::ServerConf; -use pingora::services::listening::Service; -use pingora_proxy::http_proxy_service; -use tracing::{info, warn}; - -use super::ProxyConfig; -use crate::commands::PROXY_HOST; -use crate::proxy::update_workers::LoadBalancerUpdateService; -use crate::proxy::{LoadBalancer, LoadBalancerState}; -use crate::utils::check_port_availability; - -/// Starts the proxy. -/// -/// Example: `miden-remote-prover start-proxy --workers 0.0.0.0:8080,127.0.0.1:9090` -#[derive(Debug, Parser)] -pub struct StartProxy { - /// List of workers as host:port strings. - /// - /// Example: `127.0.0.1:8080,192.168.1.1:9090` - #[arg(long, env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Proxy configurations. - #[command(flatten)] - proxy_config: ProxyConfig, -} - -impl StartProxy { - /// Starts the proxy using the configuration defined in the command. - /// - /// This method will start a proxy with each worker passed as command argument as a backend, - /// using the configurations passed as options for the commands or the equivalent environmental - /// variables. - /// - /// # Errors - /// Returns an error in the following cases: - /// - The backend cannot be created. - /// - The Pingora configuration fails. - /// - The server cannot be started. - #[tracing::instrument(target = COMPONENT, name = "proxy.execute")] - pub async fn execute(&self) -> anyhow::Result<()> { - // Check if all required ports are available - check_port_availability(self.proxy_config.port, "Proxy")?; - check_port_availability(self.proxy_config.control_port, "Control")?; - - // First, check if the metrics port is specified (metrics enabled) - if let Some(metrics_port) = self.proxy_config.metrics_config.metrics_port { - check_port_availability(metrics_port, "Metrics")?; - } - - let mut conf = ServerConf::new().ok_or(RemoteProverError::PingoraConfigFailed( - "Failed to create server conf".to_string(), - ))?; - conf.grace_period_seconds = Some(self.proxy_config.grace_period.as_secs()); - conf.graceful_shutdown_timeout_seconds = - Some(self.proxy_config.graceful_shutdown_timeout.as_secs()); - - let mut server = Server::new_with_opt_and_conf(Some(Opt::default()), conf); - - server.bootstrap(); - - if self.workers.is_empty() { - warn!(target: COMPONENT, "Starting proxy without any workers"); - } else { - info!(target: COMPONENT, - worker_count = %self.workers.len(), - workers = ?self.workers, - "Proxy starting with workers" - ); - } - - let worker_lb = LoadBalancerState::new(self.workers.clone(), &self.proxy_config).await?; - - let health_check_service = background_service("health_check", worker_lb); - - let worker_lb = health_check_service.task(); - - let updater_service = LoadBalancerUpdateService::new(worker_lb.clone()); - - let mut update_workers_service = - Service::new("update_workers".to_string(), updater_service); - update_workers_service - .add_tcp(format!("{}:{}", PROXY_HOST, self.proxy_config.control_port).as_str()); - - // Set up the load balancer - let mut lb = http_proxy_service(&server.configuration, LoadBalancer(worker_lb.clone())); - - lb.add_tcp(format!("{}:{}", PROXY_HOST, self.proxy_config.port).as_str()); - info!(target: COMPONENT, - endpoint = %format!("{}:{}", PROXY_HOST, self.proxy_config.port), - "Proxy service listening" - ); - let logic = lb - .app_logic_mut() - .ok_or(RemoteProverError::PingoraConfigFailed("app logic not found".to_string()))?; - let mut http_server_options = HttpServerOptions::default(); - - // Enable HTTP/2 for plaintext - http_server_options.h2c = true; - logic.server_options = Some(http_server_options); - - // Enable Prometheus metrics if metrics_port is specified - if let Some(metrics_port) = self.proxy_config.metrics_config.metrics_port { - let metrics_addr = format!("{PROXY_HOST}:{metrics_port}"); - info!(target: COMPONENT, - endpoint = %metrics_addr, - "Metrics service initialized" - ); - let mut prometheus_service = - pingora::services::listening::Service::prometheus_http_service(); - prometheus_service.add_tcp(&metrics_addr); - server.add_service(prometheus_service); - } else { - info!(target: COMPONENT, "Metrics service disabled"); - } - - server.add_service(health_check_service); - server.add_service(update_workers_service); - server.add_service(lb); - tokio::task::spawn_blocking(|| server.run_forever()).await?; - - Ok(()) - } -} diff --git a/bin/remote-prover/src/commands/update_workers.rs b/bin/remote-prover/src/commands/update_workers.rs deleted file mode 100644 index c661a39dd..000000000 --- a/bin/remote-prover/src/commands/update_workers.rs +++ /dev/null @@ -1,126 +0,0 @@ -use anyhow::Context; -use clap::Parser; -use reqwest::Client; -use serde::{Deserialize, Serialize}; - -use crate::commands::PROXY_HOST; - -// ADD WORKERS -// ================================================================================================ - -/// Add workers to the proxy -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct AddWorkers { - /// Workers to be added to the proxy. - /// - /// The workers are passed as host:port strings. - #[arg(value_name = "WORKERS", env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Port of the proxy endpoint to update workers. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - control_port: u16, -} - -// REMOVE WORKERS -// ================================================================================================ - -/// Remove workers from the proxy -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct RemoveWorkers { - /// Workers to be removed from the proxy. - /// - /// The workers are passed as host:port strings. - #[arg(value_name = "WORKERS", env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Port of the proxy endpoint to update workers. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - control_port: u16, -} - -// UPDATE WORKERS -// ================================================================================================ - -/// Action to perform on the workers -#[derive(clap::ValueEnum, Clone, Debug, Serialize, Deserialize)] -pub enum Action { - Add, - Remove, -} - -/// Update workers in the proxy performing the specified [`Action`] -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct UpdateWorkers { - pub action: Action, - pub workers: Vec, - pub control_port: u16, -} - -impl UpdateWorkers { - /// Makes a requests to the update workers endpoint to update the workers. - /// - /// It works by sending a GET request to the proxy with the query parameters. The query - /// parameters are serialized from the struct fields. - /// - /// It uses the URL defined in the env vars or passed as parameter for the proxy. - /// - /// The request will return the new number of workers in the X-Worker-Count header. - /// - /// # Errors - /// - If the query parameters cannot be serialized. - /// - If the request fails. - /// - If the status code is not successful. - /// - If the X-Worker-Count header is missing. - pub async fn execute(&self) -> anyhow::Result<()> { - let query_params = serde_qs::to_string(&self)?; - - println!("Action: {:?}, with workers: {:?}", self.action, self.workers); - - // Create the full URL with fixed host "0.0.0.0" - let url = format!("http://{}:{}?{}", PROXY_HOST, self.control_port, query_params); - - // Create an HTTP/2 client - let client = Client::builder().http2_prior_knowledge().build()?; - - // Make the request - let response = client.get(url).send().await?; - - // Check status code - if !response.status().is_success() { - anyhow::bail!("Request failed with status code: {}", response.status()); - } - - // Read the X-Worker-Count header - let workers_count = response - .headers() - .get("X-Worker-Count") - .context("Missing X-Worker-Count header")? - .to_str()?; - - println!("New number of workers: {workers_count}"); - - Ok(()) - } -} - -// CONVERSIONS -// ================================================================================================ - -impl From for UpdateWorkers { - fn from(remove_workers: RemoveWorkers) -> Self { - UpdateWorkers { - action: Action::Remove, - workers: remove_workers.workers, - control_port: remove_workers.control_port, - } - } -} - -impl From for UpdateWorkers { - fn from(add_workers: AddWorkers) -> Self { - UpdateWorkers { - action: Action::Add, - workers: add_workers.workers, - control_port: add_workers.control_port, - } - } -} diff --git a/bin/remote-prover/src/commands/worker.rs b/bin/remote-prover/src/commands/worker.rs deleted file mode 100644 index 1417e5baa..000000000 --- a/bin/remote-prover/src/commands/worker.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::time::Duration; - -use clap::Parser; -use miden_node_utils::cors::cors_for_grpc_web_layer; -use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; -use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::{ProofType, RpcListener}; -use miden_remote_prover::generated::api_server::ApiServer; -use tokio::net::TcpListener; -use tokio_stream::wrappers::TcpListenerStream; -use tonic_health::server::health_reporter; -use tonic_web::GrpcWebLayer; -use tower_http::trace::TraceLayer; -use tracing::{info, instrument}; - -/// Starts a worker. -#[derive(Debug, Parser)] -pub struct StartWorker { - /// Use localhost (127.0.0.1) instead of 0.0.0.0 - #[arg(long, env = "MRP_WORKER_LOCALHOST")] - localhost: bool, - /// The port of the worker - #[arg(long, default_value = "50051", env = "MRP_WORKER_PORT")] - port: u16, - /// The type of proof that the worker will be handling - #[arg(long, env = "MRP_WORKER_PROOF_TYPE")] - proof_type: ProofType, - /// Maximum time allowed for a request to complete. Once exceeded, the request is - /// aborted. - #[arg(long, default_value = "60s", env = "MRP_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) timeout: Duration, -} - -impl StartWorker { - /// Starts a worker. - /// - /// This method receives the port from the CLI and starts a worker on that port. - /// The host will be 127.0.0.1 if --localhost is specified, otherwise 0.0.0.0. - /// In case that the port is not provided, it will default to `50051`. - /// - /// The worker includes a health reporter that will mark the service as serving, following the - /// [gRPC health checking protocol]( - /// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto). - #[instrument(target = COMPONENT, name = "worker.execute")] - pub async fn execute(&self) -> anyhow::Result<()> { - let host = if self.localhost { "127.0.0.1" } else { "0.0.0.0" }; - let worker_addr = format!("{}:{}", host, self.port); - let rpc = RpcListener::new(TcpListener::bind(&worker_addr).await?, self.proof_type); - - let server_addr = rpc.listener.local_addr()?; - info!(target: COMPONENT, - endpoint = %server_addr, - proof_type = ?self.proof_type, - host = %host, - port = %self.port, - "Worker server initialized and listening" - ); - - // Create a health reporter - let (health_reporter, health_service) = health_reporter(); - - // Mark the service as serving - health_reporter.set_serving::>().await; - - tonic::transport::Server::builder() - .accept_http1(true) - .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) - .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .layer(cors_for_grpc_web_layer()) - .layer(GrpcWebLayer::new()) - .timeout(self.timeout) - .add_service(rpc.api_service) - .add_service(rpc.status_service) - .add_service(health_service) - .serve_with_incoming(TcpListenerStream::new(rpc.listener)) - .await?; - - Ok(()) - } -} diff --git a/bin/remote-prover/src/error.rs b/bin/remote-prover/src/error.rs deleted file mode 100644 index 16638c04c..000000000 --- a/bin/remote-prover/src/error.rs +++ /dev/null @@ -1,27 +0,0 @@ -use axum::http::uri::InvalidUri; -use thiserror::Error; - -// TX PROVER SERVICE ERROR -// ================================================================================================ - -#[derive(Debug, Error)] -pub enum RemoteProverError { - #[error("invalid uri {1}")] - InvalidURI(#[source] InvalidUri, String), - #[error("failed to connect to worker {1}")] - ConnectionFailed(#[source] tonic::transport::Error, String), - #[error("failed to create backend for worker")] - BackendCreationFailed(#[source] Box), - #[error("failed to setup pingora: {0}")] - PingoraConfigFailed(String), - #[error("failed to parse int: {0}")] - ParseError(#[from] std::num::ParseIntError), - #[error("port {1} is already in use: {0}")] - PortAlreadyInUse(#[source] std::io::Error, u16), -} - -impl From for String { - fn from(err: RemoteProverError) -> Self { - err.to_string() - } -} diff --git a/bin/remote-prover/src/generated/conversions.rs b/bin/remote-prover/src/generated/conversions.rs deleted file mode 100644 index e1bdc6406..000000000 --- a/bin/remote-prover/src/generated/conversions.rs +++ /dev/null @@ -1,90 +0,0 @@ -// CONVERSIONS -// ================================================================================================ - -use miden_node_proto::BlockProofRequest; -use miden_protocol::batch::ProposedBatch; -use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; -use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; - -use crate::api::ProofType; -use crate::generated as proto; - -impl From for proto::Proof { - fn from(value: ProvenTransaction) -> Self { - proto::Proof { payload: value.to_bytes() } - } -} - -impl TryFrom for ProvenTransaction { - type Error = DeserializationError; - - fn try_from(response: proto::Proof) -> Result { - ProvenTransaction::read_from_bytes(&response.payload) - } -} - -impl TryFrom for TransactionInputs { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - TransactionInputs::read_from_bytes(&request.payload) - } -} - -impl TryFrom for ProposedBatch { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - ProposedBatch::read_from_bytes(&request.payload) - } -} - -impl TryFrom for BlockProofRequest { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - BlockProofRequest::read_from_bytes(&request.payload) - } -} - -impl From for proto::ProofType { - fn from(value: ProofType) -> Self { - match value { - ProofType::Transaction => proto::ProofType::Transaction, - ProofType::Batch => proto::ProofType::Batch, - ProofType::Block => proto::ProofType::Block, - } - } -} - -impl From for ProofType { - fn from(value: proto::ProofType) -> Self { - match value { - proto::ProofType::Transaction => ProofType::Transaction, - proto::ProofType::Batch => ProofType::Batch, - proto::ProofType::Block => ProofType::Block, - } - } -} - -impl TryFrom for ProofType { - type Error = String; - fn try_from(value: i32) -> Result { - match value { - 0 => Ok(ProofType::Transaction), - 1 => Ok(ProofType::Batch), - 2 => Ok(ProofType::Block), - _ => Err(format!("unknown ProverType value: {value}")), - } - } -} - -impl From for i32 { - fn from(value: ProofType) -> Self { - match value { - ProofType::Transaction => 0, - ProofType::Batch => 1, - ProofType::Block => 2, - } - } -} diff --git a/bin/remote-prover/src/generated/mod.rs b/bin/remote-prover/src/generated/mod.rs index 830c3a508..f2af60274 100644 --- a/bin/remote-prover/src/generated/mod.rs +++ b/bin/remote-prover/src/generated/mod.rs @@ -2,7 +2,5 @@ #![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[rustfmt::skip] -pub mod remote_prover; -mod conversions; - +mod remote_prover; pub use remote_prover::*; diff --git a/bin/remote-prover/src/lib.rs b/bin/remote-prover/src/lib.rs deleted file mode 100644 index 0388ae685..000000000 --- a/bin/remote-prover/src/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod api; -pub mod error; -pub mod generated; - -/// Component identifier for structured logging and tracing -pub const COMPONENT: &str = "miden-remote-prover"; diff --git a/bin/remote-prover/src/main.rs b/bin/remote-prover/src/main.rs index d4fc42f6d..e445d80f1 100644 --- a/bin/remote-prover/src/main.rs +++ b/bin/remote-prover/src/main.rs @@ -1,22 +1,20 @@ +use anyhow::Context; use clap::Parser; use miden_node_utils::logging::{OpenTelemetry, setup_tracing}; -use miden_remote_prover::COMPONENT; use tracing::info; -use crate::commands::Cli; +mod generated; +mod server; -pub(crate) mod commands; -pub(crate) mod proxy; -pub(crate) mod utils; +const COMPONENT: &str = "miden-prover"; #[tokio::main] async fn main() -> anyhow::Result<()> { let _otel_guard = setup_tracing(OpenTelemetry::Enabled)?; info!(target: COMPONENT, "Tracing initialized"); - // read command-line args - let cli = Cli::parse(); + let (handle, _port) = + server::Server::parse().spawn().await.context("failed to spawn server")?; - // execute cli action - cli.execute().await + handle.await.context("proof server panicked").flatten() } diff --git a/bin/remote-prover/src/proxy/health_check.rs b/bin/remote-prover/src/proxy/health_check.rs deleted file mode 100644 index b583c0982..000000000 --- a/bin/remote-prover/src/proxy/health_check.rs +++ /dev/null @@ -1,70 +0,0 @@ -use miden_remote_prover::COMPONENT; -use pingora::prelude::sleep; -use pingora::server::ShutdownWatch; -use pingora::services::background::BackgroundService; -use tonic::async_trait; -use tracing::{debug_span, error}; - -use super::LoadBalancerState; - -/// Implement the [`BackgroundService`] trait for the [`LoadBalancerState`]. -/// -/// A [`BackgroundService`] can be run as part of a Pingora application to add supporting logic that -/// exists outside of the request/response lifecycle. -/// -/// We use this implementation to periodically check the health of the workers and update the list -/// of available workers. -#[async_trait] -impl BackgroundService for LoadBalancerState { - /// Starts the health check background service. - /// - /// This function is called when the Pingora server tries to start all the services. The - /// background service can return at anytime or wait for the `shutdown` signal. - /// - /// The health check background service will periodically check the health of the workers - /// using the gRPC status endpoint. If a worker is not healthy, it will be removed from - /// the list of available workers. - /// - /// # Errors - /// - If the worker has an invalid URI. - async fn start(&self, shutdown: ShutdownWatch) { - Box::pin(async move { - loop { - // Check if the shutdown signal has been received - { - if *shutdown.borrow() { - break; - } - } - - // Create a new spawn to perform the health check - let span = debug_span!(target: COMPONENT, "proxy.health_check"); - let _guard = span.enter(); - { - let mut workers = self.workers.write().await; - - for worker in workers.iter_mut() { - let status_result = worker.check_status(self.supported_proof_type).await; - - if let Err(ref reason) = status_result { - error!( - err = %reason, - worker.name = worker.name(), - "Worker failed health check" - ); - } - - worker.update_status(status_result); - } - } - - // Update the status cache with current worker status - self.update_status_cache().await; - - // Sleep for the defined interval before the next health check - sleep(self.health_check_interval).await; - } - }) - .await; - } -} diff --git a/bin/remote-prover/src/proxy/metrics.rs b/bin/remote-prover/src/proxy/metrics.rs deleted file mode 100644 index 9b5c579d9..000000000 --- a/bin/remote-prover/src/proxy/metrics.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::sync::LazyLock; - -use prometheus::{ - Histogram, - IntCounter, - IntCounterVec, - IntGauge, - register_histogram, - register_int_counter, - register_int_counter_vec, - register_int_gauge, -}; - -// SAFETY: The `unwrap` calls here are safe because: -// 1. The metrics being registered (gauges, counters, histograms) use hardcoded names and -// descriptions, which are guaranteed not to conflict within the application. -// 2. Registration errors occur only if there is a naming conflict, which is not possible in this -// context due to controlled metric definitions. -// 3. Any changes to metric names or types should be carefully reviewed to avoid conflicts. - -// QUEUE METRICS -// ================================================================================================ - -pub static QUEUE_SIZE: LazyLock = - LazyLock::new(|| register_int_gauge!("queue_size", "Number of requests in the queue").unwrap()); -pub static QUEUE_LATENCY: LazyLock = LazyLock::new(|| { - register_histogram!( - "queue_latency", - "Time (in seconds) requests spend in the queue", - vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0] - ) - .unwrap() -}); -pub static QUEUE_DROP_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("queue_drop_count", "Number of requests dropped due to a full queue") - .unwrap() -}); - -// WORKER METRICS -// ================================================================================================ - -pub static WORKER_COUNT: LazyLock = - LazyLock::new(|| register_int_gauge!("worker_count", "Total number of workers").unwrap()); -pub static WORKER_UNHEALTHY: LazyLock = LazyLock::new(|| { - register_int_counter_vec!( - "worker_unhealthy", - "Number of times that each worker was registered as unhealthy", - &["worker_id"] - ) - .unwrap() -}); -pub static WORKER_BUSY: LazyLock = - LazyLock::new(|| register_int_gauge!("worker_busy", "Number of busy workers").unwrap()); -pub static WORKER_REQUEST_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter_vec!( - "worker_request_count", - "Number of requests processed by each worker", - &["worker_id"] - ) - .unwrap() -}); - -// REQUEST METRICS -// ================================================================================================ - -pub static REQUEST_FAILURE_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("request_failure_count", "Number of failed requests").unwrap() -}); -pub static REQUEST_RETRIES: LazyLock = LazyLock::new(|| { - register_int_counter!("request_retries", "Number of request retries").unwrap() -}); -pub static REQUEST_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("request_count", "Number of requests processed").unwrap() -}); -pub static REQUEST_LATENCY: LazyLock = LazyLock::new(|| { - register_histogram!( - "request_latency", - "Time (in seconds) requests take to process", - vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0] - ) - .unwrap() -}); - -// RATE LIMITING METRICS -// ================================================================================================ - -pub static RATE_LIMITED_REQUESTS: LazyLock = LazyLock::new(|| { - register_int_counter!( - "rate_limited_requests", - "Number of requests blocked due to rate limiting" - ) - .unwrap() -}); -pub static RATE_LIMIT_VIOLATIONS: LazyLock = LazyLock::new(|| { - register_int_counter!("rate_limit_violations", "Number of rate limit violations by clients") - .unwrap() -}); diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs deleted file mode 100644 index e543022ac..000000000 --- a/bin/remote-prover/src/proxy/mod.rs +++ /dev/null @@ -1,772 +0,0 @@ -use std::collections::VecDeque; -use std::sync::{Arc, LazyLock}; -use std::time::{Duration, Instant}; - -use async_trait::async_trait; -use bytes::Bytes; -use metrics::{ - QUEUE_LATENCY, - QUEUE_SIZE, - RATE_LIMIT_VIOLATIONS, - RATE_LIMITED_REQUESTS, - REQUEST_COUNT, - REQUEST_FAILURE_COUNT, - REQUEST_LATENCY, - REQUEST_RETRIES, - WORKER_BUSY, - WORKER_COUNT, - WORKER_REQUEST_COUNT, -}; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use miden_remote_prover::error::RemoteProverError; -use miden_remote_prover::generated::remote_prover::{ProxyStatus, ProxyWorkerStatus}; -use pingora::http::ResponseHeader; -use pingora::prelude::*; -use pingora::protocols::Digest; -use pingora::upstreams::peer::{ALPN, Peer}; -use pingora_core::Result; -use pingora_core::upstreams::peer::HttpPeer; -use pingora_limits::rate::Rate; -use pingora_proxy::{FailToProxy, ProxyHttp, Session}; -use tokio::sync::RwLock; -use tracing::{Span, debug, error, info, info_span, warn}; -use uuid::Uuid; -use worker::Worker; - -use crate::commands::ProxyConfig; -use crate::commands::update_workers::{Action, UpdateWorkers}; -use crate::utils::{ - create_queue_full_response, - create_response_with_error_message, - create_too_many_requests_response, - write_grpc_response_to_session, -}; - -mod health_check; -pub mod metrics; -pub(crate) mod update_workers; -pub(crate) mod worker; - -// CONSTANTS -// ================================================================================================ - -const PROXY_STATUS_PATH: &str = "/remote_prover.ProxyStatusApi/Status"; - -// LOAD BALANCER STATE -// ================================================================================================ - -/// Load balancer that uses a round robin strategy -#[derive(Debug)] -pub struct LoadBalancerState { - workers: Arc>>, - timeout: Duration, - connection_timeout: Duration, - max_queue_items: usize, - max_retries_per_request: usize, - max_req_per_sec: isize, - available_workers_polling_interval: Duration, - health_check_interval: Duration, - supported_proof_type: ProofType, - status_cache_sender: tokio::sync::watch::Sender, - status_cache_receiver: tokio::sync::watch::Receiver, -} - -impl LoadBalancerState { - /// Create a new load balancer - /// - /// # Errors - /// Returns an error if: - /// - The worker cannot be created. - #[tracing::instrument(target = COMPONENT, name = "proxy.new_load_balancer", skip(initial_workers))] - pub(crate) async fn new( - initial_workers: Vec, - config: &ProxyConfig, - ) -> core::result::Result { - let mut workers: Vec = Vec::with_capacity(initial_workers.len()); - - let connection_timeout = config.connection_timeout; - let total_timeout = config.timeout; - - for worker_addr in initial_workers { - match Worker::new(worker_addr, connection_timeout, total_timeout).await { - Ok(w) => workers.push(w), - Err(e) => { - error!("Failed to create worker: {}", e); - }, - } - } - - info!("Workers created: {:?}", workers); - - WORKER_COUNT.set(i64::try_from(workers.len()).expect("worker count greater than i64::MAX")); - RATE_LIMIT_VIOLATIONS.reset(); - RATE_LIMITED_REQUESTS.reset(); - REQUEST_RETRIES.reset(); - - let workers = Arc::new(RwLock::new(workers)); - let supported_proof_type = config.proof_type; - - // Build initial status for the cache - let initial_status = { - let workers_guard = workers.read().await; - build_proxy_status_response(&workers_guard, supported_proof_type) - }; - - // Create the status cache channel - let (status_cache_sender, status_cache_receiver) = - tokio::sync::watch::channel(initial_status); - - Ok(Self { - workers, - timeout: total_timeout, - connection_timeout, - max_queue_items: config.max_queue_items, - max_retries_per_request: config.max_retries_per_request, - max_req_per_sec: config.max_req_per_sec, - available_workers_polling_interval: config.available_workers_polling_interval, - health_check_interval: config.health_check_interval, - supported_proof_type, - status_cache_sender, - status_cache_receiver, - }) - } - - /// Gets an available worker and marks it as unavailable. - /// - /// If no worker is available, it will return None. - pub async fn pop_available_worker(&self) -> Option { - let mut available_workers = self.workers.write().await; - available_workers.iter_mut().find(|w| w.is_available()).map(|w| { - w.set_availability(false); - WORKER_BUSY.inc(); - w.clone() - }) - } - - /// Marks the given worker as available and moves it to the end of the list. - /// - /// If the worker is not in the list, it won't be added. - /// The worker is moved to the end of the list to avoid overloading since the selection of the - /// worker is done in order, causing the workers at the beginning of the list to be selected - /// more often. - pub async fn add_available_worker(&self, worker: Worker) { - let mut workers = self.workers.write().await; - if let Some(pos) = workers.iter().position(|w| *w == worker) { - // Remove the worker from its current position - let mut w = workers.remove(pos); - // Mark it as available - w.set_availability(true); - // Add it to the end of the list - workers.push(w); - } - } - - /// Updates the list of available workers based on the given action ("add" or "remove"). - /// - /// # Behavior - /// - /// ## Add Action - /// - If the worker exists in the current workers list, do nothing. - /// - Otherwise, add it and mark it as available. - /// - /// ## Remove Action - /// - If the worker exists in the current workers list, remove it. - /// - Otherwise, do nothing. - /// - /// # Errors - /// - If the worker cannot be created. - pub async fn update_workers( - &self, - update_workers: UpdateWorkers, - ) -> std::result::Result<(), RemoteProverError> { - let mut workers = self.workers.write().await; - info!("Current workers: {:?}", workers); - - let mut native_workers = Vec::new(); - - for worker_addr in update_workers.workers { - native_workers - .push(Worker::new(worker_addr, self.connection_timeout, self.timeout).await?); - } - - match update_workers.action { - Action::Add => { - for worker in native_workers { - if !workers.iter().any(|w| w == &worker) { - workers.push(worker); - } - } - }, - Action::Remove => { - for worker in native_workers { - workers.retain(|w| w != &worker); - } - }, - } - - info!("Workers updated: {:?}", workers); - WORKER_COUNT.set(i64::try_from(workers.len()).expect("worker count greater than i64::MAX")); - - Ok(()) - } - - /// Get the total number of current workers. - pub async fn num_workers(&self) -> usize { - self.workers.read().await.len() - } - - /// Get the number of busy workers. - pub async fn num_busy_workers(&self) -> usize { - self.workers.read().await.iter().filter(|w| !w.is_available()).count() - } - - /// Get the cached status response - pub fn get_cached_status(&self) -> ProxyStatus { - self.status_cache_receiver.borrow().clone() - } - - /// Update the status cache with current worker status - pub async fn update_status_cache(&self) { - let workers = self.workers.read().await; - let new_status = build_proxy_status_response(&workers, self.supported_proof_type); - self.status_cache_sender.send(new_status).expect("Failed to send new status"); - } -} - -// UTILS -// ================================================================================================ - -/// Rate limiter -static RATE_LIMITER: LazyLock = LazyLock::new(|| Rate::new(Duration::from_secs(1))); - -// REQUEST QUEUE -// ================================================================================================ - -/// Request queue holds the list of requests that are waiting to be processed by the workers and -/// the time they were enqueued. -/// It is used to keep track of the order of the requests to then assign them to the workers. -pub struct RequestQueue { - queue: RwLock>, -} - -impl RequestQueue { - /// Create a new empty request queue - pub fn new() -> Self { - QUEUE_SIZE.set(0); - Self { queue: RwLock::new(VecDeque::new()) } - } - - /// Get the length of the queue - pub async fn len(&self) -> usize { - self.queue.read().await.len() - } - - /// Enqueue a request - pub async fn enqueue(&self, request_id: Uuid) { - QUEUE_SIZE.inc(); - let mut queue = self.queue.write().await; - queue.push_back((request_id, Instant::now())); - } - - /// Dequeue a request - pub async fn dequeue(&self) -> Option { - let mut queue = self.queue.write().await; - // If the queue was empty, the queue size does not change - if let Some((request_id, queued_time)) = queue.pop_front() { - QUEUE_SIZE.dec(); - QUEUE_LATENCY.observe(queued_time.elapsed().as_secs_f64()); - Some(request_id) - } else { - None - } - } - - /// Peek at the first request in the queue - pub async fn peek(&self) -> Option { - let queue = self.queue.read().await; - queue.front().copied().map(|(request_id, _)| request_id) - } -} - -/// Shared state. It keeps track of the order of the requests to then assign them to the workers. -static QUEUE: LazyLock = LazyLock::new(RequestQueue::new); - -// OPENTELEMETRY CONTEXT INJECTION -// ================================================================================================ - -/// Pingora `RequestHeader` injector for OpenTelemetry trace context propagation. -/// -/// This allows the proxy to inject trace context into headers that will be forwarded -/// to worker nodes, enabling proper parent-child trace relationships. -struct PingoraHeaderInjector<'a>(&'a mut pingora::http::RequestHeader); - -impl opentelemetry::propagation::Injector for PingoraHeaderInjector<'_> { - /// Set a key and value in the `RequestHeader` using pingora's API - fn set(&mut self, key: &str, value: String) { - // Use pingora's insert_header method which handles the proper header insertion - // Convert key to owned string to satisfy lifetime requirements - if let Err(e) = self.0.insert_header(key.to_string(), value) { - // Log error but don't fail the request if header injection fails - tracing::warn!(target: COMPONENT, header = %key, err = %e, "Failed to inject OpenTelemetry header"); - } - } -} - -// REQUEST CONTEXT -// ================================================================================================ - -/// Custom context for the request/response lifecycle -/// -/// We use this context to keep track of the number of tries for a request, the unique ID for the -/// request, the worker that will process the request, a span that will be used for traces along -/// the transaction execution, and a timer to track how long the request took. -#[derive(Debug)] -pub struct RequestContext { - /// Number of tries for the request - tries: usize, - /// Unique ID for the request - request_id: Uuid, - /// Worker that will process the request - worker: Option, - /// Parent span for the request - parent_span: Span, - /// Time when the request was created - created_at: Instant, -} - -impl RequestContext { - /// Create a new request context - fn new() -> Self { - let request_id = Uuid::new_v4(); - Self { - tries: 0, - request_id, - worker: None, - parent_span: info_span!(target: COMPONENT, "proxy.new_request", request_id = request_id.to_string()), - created_at: Instant::now(), - } - } - - /// Set the worker that will process the request - fn set_worker(&mut self, worker: Worker) { - WORKER_REQUEST_COUNT.with_label_values(&[&worker.name()]).inc(); - self.worker = Some(worker); - } -} - -// LOAD BALANCER -// ================================================================================================ - -/// Wrapper around the load balancer that implements the [`ProxyHttp`] trait -/// -/// This wrapper is used to implement the [`ProxyHttp`] trait for [`Arc`]. -/// This is necessary because we want to share the load balancer between the proxy server and the -/// health check background service. -#[derive(Debug)] -pub struct LoadBalancer(pub Arc); - -/// Implements load-balancing of incoming requests across a pool of workers. -/// -/// At the backend-level, a request lifecycle works as follows: -/// - When a new requests arrives, [`LoadBalancer::request_filter()`] method is called. In this -/// method we apply IP-based rate-limiting to the request and check if the request queue is full. -/// In this method we also handle the special case update workers request. -/// - Next, the [`Self::upstream_peer()`] method is called. We use it to figure out which worker -/// will process the request. Inside `upstream_peer()`, we add the request to the queue of -/// requests. Once the request gets to the front of the queue, we forward it to an available -/// worker. This step is also in charge of setting the SNI, timeouts, and enabling HTTP/2. -/// Finally, we establish a connection with the worker. -/// - Before sending the request to the upstream server and if the connection succeed, the -/// [`Self::upstream_request_filter()`] method is called. In this method, we ensure that the -/// correct headers are forwarded for gRPC requests. -/// - If the connection fails, the [`Self::fail_to_connect()`] method is called. In this method, we -/// retry the request [`self.max_retries_per_request`] times. -/// - Once the worker processes the request (either successfully or with a failure), -/// [`Self::logging()`] method is called. In this method, we log the request lifecycle and set the -/// worker as available. -#[async_trait] -impl ProxyHttp for LoadBalancer { - type CTX = RequestContext; - fn new_ctx(&self) -> Self::CTX { - RequestContext::new() - } - - /// Decide whether to filter the request or not. Also, handle the special case of the update - /// workers request or the proxy status request. - /// - /// The proxy status request is handled separately because it is used by the health check - /// service to check the status of the proxy and returns immediate response. - /// - /// Here we apply IP-based rate-limiting to the request. We also check if the queue is full. - /// - /// If the request is rate-limited, we return a 429 response. Otherwise, we return false. - #[tracing::instrument(name = "proxy.request_filter", parent = &ctx.parent_span, skip(session))] - async fn request_filter(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result - where - Self::CTX: Send + Sync, - { - // Extract the client address early - let client_addr = match session.client_addr() { - Some(addr) => addr.to_string(), - None => { - return create_response_with_error_message( - session.as_downstream_mut(), - "No socket address".to_string(), - ) - .await - .map(|_| true); - }, - }; - - Span::current().record("client_addr", client_addr.clone()); - - let path = session.downstream_session.req_header().uri.path(); - Span::current().record("path", path); - - // Check if the request is a grpc proxy status request by checking the path - if path == PROXY_STATUS_PATH { - let status = self.0.get_cached_status(); - return write_grpc_response_to_session(session, status).await.map(|_| true); - } - - // Increment the request count - REQUEST_COUNT.inc(); - - let user_id = Some(client_addr); - - // Retrieve the current window requests - let curr_window_requests = RATE_LIMITER.observe(&user_id, 1); - - // Rate limit the request - if curr_window_requests > self.0.max_req_per_sec { - RATE_LIMITED_REQUESTS.inc(); - - // Only count a violation the first time in a given window - if curr_window_requests == self.0.max_req_per_sec + 1 { - RATE_LIMIT_VIOLATIONS.inc(); - } - - return create_too_many_requests_response(session, self.0.max_req_per_sec) - .await - .map(|_| true); - } - - let queue_len = QUEUE.len().await; - - info!("New request with ID: {}", ctx.request_id); - info!("Queue length: {}", queue_len); - - // Check if the queue is full - if queue_len >= self.0.max_queue_items { - return create_queue_full_response(session).await.map(|_| true); - } - - Ok(false) - } - - /// Returns [`HttpPeer`] corresponding to the worker that will handle the current request. - /// - /// Here we enqueue the request and wait for it to be at the front of the queue and a worker - /// becomes available, then we dequeue the request and process it. We then set the SNI, - /// timeouts, and enable HTTP/2. - /// - /// Note that the request will be assigned a worker here, and the worker will be removed from - /// the list of available workers once it reaches the [`Self::logging`] method. - #[tracing::instrument(name = "proxy.upstream_peer", parent = &ctx.parent_span, skip(_session))] - async fn upstream_peer( - &self, - _session: &mut Session, - ctx: &mut Self::CTX, - ) -> Result> { - let request_id = ctx.request_id; - - // Add the request to the queue. - QUEUE.enqueue(request_id).await; - - // Wait for the request to be at the front of the queue - loop { - // The request is at the front of the queue. - if QUEUE.peek().await.expect("Queue should not be empty") != request_id { - continue; - } - - // Check if there is an available worker - if let Some(worker) = self.0.pop_available_worker().await { - debug!("Worker {} picked up the request with ID: {}", worker.name(), request_id); - ctx.set_worker(worker); - break; - } - debug!("All workers are busy"); - tokio::time::sleep(self.0.available_workers_polling_interval).await; - } - - // Remove the request from the queue - QUEUE.dequeue().await; - - // Set SNI - let mut http_peer = HttpPeer::new( - ctx.worker.clone().expect("Failed to get worker").name(), - false, - String::new(), - ); - let peer_opts = - http_peer.get_mut_peer_options().ok_or(Error::new(ErrorType::InternalError))?; - - // Timeout settings - peer_opts.total_connection_timeout = Some(self.0.timeout); - peer_opts.connection_timeout = Some(self.0.connection_timeout); - - // Enable HTTP/2 - peer_opts.alpn = ALPN::H2; - - let peer = Box::new(http_peer); - Ok(peer) - } - - /// Applies the necessary filters to the request before sending it to the upstream server. - /// - /// Here we ensure that the correct headers are forwarded for gRPC requests and inject - /// the X-Request-ID header and OpenTelemetry trace context for trace correlation between proxy - /// and worker. - /// - /// This method is called right after [`Self::upstream_peer()`] returns a [`HttpPeer`] and a - /// connection is established with the worker. - #[tracing::instrument(name = "proxy.upstream_request_filter", parent = &_ctx.parent_span, skip(_session))] - async fn upstream_request_filter( - &self, - _session: &mut Session, - upstream_request: &mut RequestHeader, - _ctx: &mut Self::CTX, - ) -> Result<()> - where - Self::CTX: Send + Sync, - { - // Check if it's a gRPC request - if let Some(content_type) = upstream_request.headers.get("content-type") - && content_type == "application/grpc" - { - // Ensure the correct host and gRPC headers are forwarded - upstream_request.insert_header("content-type", "application/grpc")?; - } - - // Always inject X-Request-ID header for trace correlation - // This allows the worker traces to be correlated with the proxy traces - upstream_request.insert_header("x-request-id", _ctx.request_id.to_string())?; - - // Inject OpenTelemetry trace context for proper trace propagation - // This allows the worker trace to be a child of the proxy trace - { - use tracing_opentelemetry::OpenTelemetrySpanExt; - let ctx = tracing::Span::current().context(); - opentelemetry::global::get_text_map_propagator(|propagator| { - propagator.inject_context(&ctx, &mut PingoraHeaderInjector(upstream_request)); - }); - } - - Ok(()) - } - - /// Retry the request if the connection fails. - #[tracing::instrument(name = "proxy.fail_to_connect", parent = &ctx.parent_span, skip(_session))] - fn fail_to_connect( - &self, - _session: &mut Session, - peer: &HttpPeer, - ctx: &mut Self::CTX, - mut e: Box, - ) -> Box { - if ctx.tries > self.0.max_retries_per_request { - return e; - } - REQUEST_RETRIES.inc(); - ctx.tries += 1; - e.set_retry(true); - e - } - - /// Logs the request lifecycle in case that an error happened and sets the worker as available. - /// - /// This method is the last one in the request lifecycle, no matter if the request was - /// processed or not. - #[tracing::instrument(name = "proxy.logging", parent = &ctx.parent_span, skip(_session))] - async fn logging(&self, _session: &mut Session, e: Option<&Error>, ctx: &mut Self::CTX) - where - Self::CTX: Send + Sync, - { - if let Some(e) = e { - REQUEST_FAILURE_COUNT.inc(); - error!("Error: {:?}", e); - } - - // Mark the worker as available - if let Some(worker) = ctx.worker.take() { - self.0.add_available_worker(worker).await; - } - - REQUEST_LATENCY.observe(ctx.created_at.elapsed().as_secs_f64()); - - // Update the number of busy workers - WORKER_BUSY.set( - i64::try_from(self.0.num_busy_workers().await) - .expect("busy worker count greater than i64::MAX"), - ); - } - - // The following methods are a copy of the default implementation defined in the trait, but - // with tracing instrumentation. - // Pingora calls these methods to handle the request/response lifecycle internally and since - // the trait is defined in a different crate, we cannot add the tracing instrumentation there. - // We use the default implementation by implementing the method for our specific type, adding - // the tracing instrumentation and internally calling `ProxyHttp` methods. - // ============================================================================================ - #[tracing::instrument(name = "proxy.early_request_filter", parent = &ctx.parent_span, skip(_session))] - async fn early_request_filter( - &self, - _session: &mut Session, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.early_request_filter(_session, &mut ()).await - } - - #[tracing::instrument(name = "proxy.connected_to_upstream", parent = &ctx.parent_span, skip(_session, _sock, _reused, _peer, _fd, _digest))] - async fn connected_to_upstream( - &self, - _session: &mut Session, - _reused: bool, - _peer: &HttpPeer, - #[cfg(unix)] _fd: std::os::unix::io::RawFd, - #[cfg(windows)] _sock: std::os::windows::io::RawSocket, - _digest: Option<&Digest>, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl - .connected_to_upstream(_session, _reused, _peer, _fd, _digest, &mut ()) - .await - } - - #[tracing::instrument(name = "proxy.request_body_filter", parent = &ctx.parent_span, skip(session, body))] - async fn request_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl - .request_body_filter(session, body, end_of_stream, &mut ()) - .await - } - - #[tracing::instrument(name = "proxy.upstream_response_filter", parent = &ctx.parent_span, skip(session, upstream_response))] - fn upstream_response_filter( - &self, - session: &mut Session, - upstream_response: &mut ResponseHeader, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.upstream_response_filter(session, upstream_response, &mut ()) - } - - #[tracing::instrument(name = "proxy.response_filter", parent = &ctx.parent_span, skip(session, upstream_response))] - async fn response_filter( - &self, - session: &mut Session, - upstream_response: &mut ResponseHeader, - ctx: &mut Self::CTX, - ) -> Result<()> - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.response_filter(session, upstream_response, &mut ()).await - } - - #[tracing::instrument(name = "proxy.upstream_response_body_filter", parent = &ctx.parent_span, skip(session, body))] - fn upstream_response_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.upstream_response_body_filter(session, body, end_of_stream, &mut ()) - } - - #[tracing::instrument(name = "proxy.response_body_filter", parent = &ctx.parent_span, skip(session, body))] - fn response_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result> - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.response_body_filter(session, body, end_of_stream, &mut ()) - } - - #[tracing::instrument(name = "proxy.fail_to_proxy", parent = &ctx.parent_span, skip(session))] - async fn fail_to_proxy( - &self, - session: &mut Session, - e: &Error, - ctx: &mut Self::CTX, - ) -> FailToProxy - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.fail_to_proxy(session, e, &mut ()).await - } - - #[tracing::instrument(name = "proxy.error_while_proxy", parent = &ctx.parent_span, skip(session))] - fn error_while_proxy( - &self, - peer: &HttpPeer, - session: &mut Session, - e: Box, - ctx: &mut Self::CTX, - client_reused: bool, - ) -> Box { - ProxyHttpDefaultImpl.error_while_proxy(peer, session, e, &mut (), client_reused) - } -} - -// PROXY HTTP DEFAULT IMPLEMENTATION -// ================================================================================================ - -/// Default implementation of the [`ProxyHttp`] trait. -/// -/// It is used to provide the default methods of the trait in order for the [`LoadBalancer`] to -/// implement the trait adding tracing instrumentation but without having to copy all default -/// implementations. -struct ProxyHttpDefaultImpl; - -#[async_trait] -impl ProxyHttp for ProxyHttpDefaultImpl { - type CTX = (); - fn new_ctx(&self) {} - - /// This method is the only one that does not have a default implementation in the trait. - async fn upstream_peer( - &self, - _session: &mut Session, - _ctx: &mut Self::CTX, - ) -> Result> { - unimplemented!("This is a dummy implementation, should not be called") - } -} - -// HELPERS -// ================================================================================================ - -/// Builds a `ProxyStatusResponse` from a list of workers and a supported proof type. -fn build_proxy_status_response(workers: &[Worker], supported_proof_type: ProofType) -> ProxyStatus { - let worker_statuses: Vec = - workers.iter().map(ProxyWorkerStatus::from).collect(); - ProxyStatus { - version: env!("CARGO_PKG_VERSION").to_string(), - supported_proof_type: supported_proof_type.into(), - workers: worker_statuses, - } -} diff --git a/bin/remote-prover/src/proxy/update_workers.rs b/bin/remote-prover/src/proxy/update_workers.rs deleted file mode 100644 index 320ac5a67..000000000 --- a/bin/remote-prover/src/proxy/update_workers.rs +++ /dev/null @@ -1,152 +0,0 @@ -use core::fmt; -use std::sync::Arc; - -use miden_node_utils::ErrorReport; -use miden_remote_prover::COMPONENT; -use pingora::apps::{HttpServerApp, HttpServerOptions, ReusedHttpStream}; -use pingora::http::ResponseHeader; -use pingora::protocols::http::ServerSession; -use pingora::server::ShutdownWatch; -use tonic::async_trait; -use tracing::{error, info}; - -use super::LoadBalancerState; -use crate::commands::update_workers::UpdateWorkers; -use crate::utils::create_response_with_error_message; - -/// The Load Balancer Updater Service. -/// -/// This service is responsible for updating the list of workers in the load balancer. -pub(crate) struct LoadBalancerUpdateService { - lb_state: Arc, - server_opts: HttpServerOptions, -} - -/// Manually implement Debug for `LoadBalancerUpdateService`. -/// [`HttpServerOptions`] does not implement Debug, so we cannot derive Debug for -/// [`LoadBalancerUpdateService`], which is needed for the tracing instrumentation. -impl fmt::Debug for LoadBalancerUpdateService { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LBUpdaterService") - .field("lb_state", &self.lb_state) - .finish_non_exhaustive() - } -} - -impl LoadBalancerUpdateService { - pub(crate) fn new(lb_state: Arc) -> Self { - let mut server_opts = HttpServerOptions::default(); - server_opts.h2c = true; - - Self { lb_state, server_opts } - } -} - -#[async_trait] -impl HttpServerApp for LoadBalancerUpdateService { - /// Handles the update workers request. - /// - /// # Behavior - /// - Reads the HTTP request from the session. - /// - If query parameters are present, attempts to parse them as an `UpdateWorkers` object. - /// - If the parsing fails, returns an error response. - /// - If successful, updates the list of workers by calling `update_workers`. - /// - If the update is successful, returns the count of available workers. - /// - /// # Errors - /// - If the HTTP request cannot be read. - /// - If the query parameters cannot be parsed. - /// - If the workers cannot be updated. - /// - If the response cannot be created. - #[tracing::instrument(target = COMPONENT, name = "lb_updater_service.process_new_http", skip(http))] - async fn process_new_http( - self: &Arc, - mut http: ServerSession, - _shutdown: &ShutdownWatch, - ) -> Option { - match http.read_request().await { - Ok(res) => { - if !res { - error!("Failed to read request header"); - create_response_with_error_message( - &mut http, - "Failed to read request header".to_string(), - ) - .await - .ok(); - return None; - } - }, - Err(e) => { - error!("HTTP server fails to read from downstream: {e}"); - create_response_with_error_message( - &mut http, - format!("HTTP server fails to read from downstream: {e}"), - ) - .await - .ok(); - return None; - }, - } - - info!("Successfully get a new request to update workers"); - - // Extract and parse query parameters, if there are not any, return early. - let Some(query_params) = http.req_header().as_ref().uri.query() else { - let error_message = "No query parameters provided".to_string(); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - }; - - let update_workers: Result = serde_qs::from_str(query_params); - let update_workers = match update_workers { - Ok(workers) => workers, - Err(err) => { - let error_message = err.as_report_context("failed to parse query parameters"); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - }, - }; - - // Update workers and handle potential errors. - if let Err(err) = self.lb_state.update_workers(update_workers).await { - let error_message = err.as_report_context("failed to update workers"); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - } - - create_workers_updated_response(&mut http, self.lb_state.num_workers().await) - .await - .ok(); - - info!("Successfully updated workers"); - - None - } - - /// Provide HTTP server options used to override default behavior. This function will be called - /// every time a new connection is processed. - fn server_options(&self) -> Option<&HttpServerOptions> { - Some(&self.server_opts) - } -} - -// HELPERS -// ================================================================================================ - -/// Create a 200 response for updated workers -/// -/// It will set the X-Worker-Count header to the number of workers. -async fn create_workers_updated_response( - session: &mut ServerSession, - workers: usize, -) -> pingora_core::Result { - let mut header = ResponseHeader::build(200, None)?; - header.insert_header("X-Worker-Count", workers.to_string())?; - session.set_keepalive(None); - session.write_response_header(Box::new(header)).await?; - Ok(true) -} diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs deleted file mode 100644 index ffa8f708e..000000000 --- a/bin/remote-prover/src/proxy/worker.rs +++ /dev/null @@ -1,419 +0,0 @@ -use std::sync::LazyLock; -use std::time::{Duration, Instant}; - -use anyhow::Context; -use miden_node_utils::ErrorReport; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use miden_remote_prover::error::RemoteProverError; -use miden_remote_prover::generated::ProxyWorkerStatus; -use miden_remote_prover::generated::remote_prover::worker_status_api_client::WorkerStatusApiClient; -use pingora::lb::Backend; -use semver::{Version, VersionReq}; -use serde::Serialize; -use tonic::transport::Channel; -use tracing::{error, info}; - -use super::metrics::WORKER_UNHEALTHY; - -/// The maximum exponent for the backoff. -/// -/// The maximum backoff is 2^[`MAX_BACKOFF_EXPONENT`] seconds. -const MAX_BACKOFF_EXPONENT: usize = 9; - -/// The version of the proxy. -/// -/// This is the version of the proxy that is used to check the version of the worker. -const MRP_PROXY_VERSION: &str = env!("CARGO_PKG_VERSION"); - -/// The version requirement for the worker. -/// -/// This is the version requirement for the worker that is used to check the version of the worker. -static WORKER_VERSION_REQUIREMENT: LazyLock = LazyLock::new(|| { - let current = - Version::parse(MRP_PROXY_VERSION).expect("Proxy version should be valid at this point"); - VersionReq::parse(&format!("~{}.{}", current.major, current.minor)) - .expect("Version should be valid at this point") -}); - -// WORKER -// ================================================================================================ - -/// A worker used for processing of requests. -/// -/// The worker is used to process requests. -/// It has a backend, a status client, a health status, and a version. -/// The backend is used to send requests to the worker. -/// The status client is used to check the status of the worker. -/// The health status is used to determine if the worker is healthy or unhealthy. -/// The version is used to check if the worker is compatible with the proxy. -/// The `is_available` is used to determine if the worker is available to process requests. -/// The `connection_timeout` is used to set the timeout for the connection to the worker. -/// The `total_timeout` is used to set the timeout for the total request. -#[derive(Debug, Clone)] -pub struct Worker { - backend: Backend, - status_client: Option>, - is_available: bool, - health_status: WorkerHealthStatus, - version: String, - connection_timeout: Duration, - total_timeout: Duration, -} - -/// The health status of a worker. -/// -/// A worker can be either healthy or unhealthy. -/// If the worker is unhealthy, it will have a number of failed attempts. -/// The number of failed attempts is incremented each time the worker is unhealthy. -#[derive(Debug, Clone, PartialEq, Serialize)] -pub enum WorkerHealthStatus { - /// The worker is healthy. - Healthy, - /// The worker is unhealthy. - Unhealthy { - /// The number of failed attempts. - num_failed_attempts: usize, - /// The timestamp of the first failure. - #[serde(skip_serializing)] - first_fail_timestamp: Instant, - /// The reason for the failure. - reason: String, - }, - /// The worker status is unknown. - Unknown, -} - -impl Worker { - // CONSTRUCTOR - // -------------------------------------------------------------------------------------------- - - /// Creates a new worker and a gRPC status client for the given worker address. - /// - /// # Errors - /// - Returns [`RemoteProverError::BackendCreationFailed`] if the worker address is invalid. - pub async fn new( - worker_addr: String, - connection_timeout: Duration, - total_timeout: Duration, - ) -> Result { - let backend = - Backend::new(&worker_addr).map_err(RemoteProverError::BackendCreationFailed)?; - - let (status_client, health_status) = - match create_status_client(&worker_addr, connection_timeout, total_timeout).await { - Ok(client) => (Some(client), WorkerHealthStatus::Unknown), - Err(err) => { - error!("Failed to create status client for worker {}: {}", worker_addr, err); - ( - None, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: 1, - first_fail_timestamp: Instant::now(), - reason: err.as_report_context("failed to create status client"), - }, - ) - }, - }; - - Ok(Self { - backend, - is_available: health_status == WorkerHealthStatus::Unknown, - status_client, - health_status, - version: String::new(), - connection_timeout, - total_timeout, - }) - } - - // MUTATORS - // -------------------------------------------------------------------------------------------- - - /// Attempts to recreate the status client for this worker. - /// - /// This method will try to create a new gRPC status client using the worker's address - /// and timeout configurations. If successful, it will update the worker's `status_client` - /// field. - /// - /// # Returns - /// - `Ok(())` if the client was successfully created - /// - `Err(RemoteProverError)` if the client creation failed - async fn recreate_status_client(&mut self) -> Result<(), RemoteProverError> { - let name = self.name(); - match create_status_client(&name, self.connection_timeout, self.total_timeout).await { - Ok(client) => { - self.status_client = Some(client); - Ok(()) - }, - Err(err) => { - error!("Failed to recreate status client for worker {}: {}", name, err); - Err(err) - }, - } - } - - /// Checks the current status of the worker and returns the result without updating worker - /// state. - /// - /// Returns `Ok(())` if the worker is healthy and compatible, or `Err(reason)` if there's an - /// issue. The caller should use `update_status` to apply the result to the worker's health - /// status. - #[tracing::instrument(target = COMPONENT, name = "worker.check_status")] - pub async fn check_status(&mut self, supported_proof_type: ProofType) -> Result<(), String> { - if !self.should_do_health_check() { - return Ok(()); - } - - // If we don't have a status client, try to recreate it - if self.status_client.is_none() { - match self.recreate_status_client().await { - Ok(()) => { - info!("Successfully recreated status client for worker {}", self.name()); - }, - Err(err) => { - return Err(err.as_report_context("failed to recreate status client")); - }, - } - } - - let worker_status = match self.status_client.as_mut().unwrap().status(()).await { - Ok(response) => response.into_inner(), - Err(e) => { - error!("Failed to check worker status ({}): {}", self.name(), e); - return Err(e.message().to_string()); - }, - }; - - if worker_status.version.is_empty() { - return Err("Worker version is empty".to_string()); - } - - if !is_valid_version(&WORKER_VERSION_REQUIREMENT, &worker_status.version).unwrap_or(false) { - return Err(format!("Worker version is invalid ({})", worker_status.version)); - } - - self.version = worker_status.version; - - let worker_supported_proof_type = ProofType::try_from(worker_status.supported_proof_type) - .inspect_err(|err| { - error!(%err, name=%self.name(), "Failed to convert worker supported proof type"); - })?; - - if supported_proof_type != worker_supported_proof_type { - return Err(format!("Unsupported proof type: {supported_proof_type}")); - } - - Ok(()) - } - - /// Updates the worker's health status based on the result from `check_status`. - /// - /// If the result is `Ok(())`, the worker is marked as healthy. - /// If the result is `Err(reason)`, the worker is marked as unhealthy with the failure reason. - #[tracing::instrument(target = COMPONENT, name = "worker.update_status")] - pub fn update_status(&mut self, check_result: Result<(), String>) { - match check_result { - Ok(()) => { - self.set_health_status(WorkerHealthStatus::Healthy); - }, - Err(reason) => { - let failed_attempts = self.num_failures(); - self.set_health_status(WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts + 1, - first_fail_timestamp: match &self.health_status { - WorkerHealthStatus::Unhealthy { first_fail_timestamp, .. } => { - *first_fail_timestamp - }, - _ => Instant::now(), - }, - reason, - }); - }, - } - } - - /// Sets the worker availability. - pub fn set_availability(&mut self, is_available: bool) { - self.is_available = is_available; - } - - // PUBLIC ACCESSORS - // -------------------------------------------------------------------------------------------- - - /// Returns the number of failures the worker has had. - pub fn num_failures(&self) -> usize { - match &self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => 0, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts, - first_fail_timestamp: _, - reason: _, - } => *failed_attempts, - } - } - - /// Returns the health status of the worker. - pub fn health_status(&self) -> &WorkerHealthStatus { - &self.health_status - } - - /// Returns the version of the worker. - pub fn version(&self) -> &str { - &self.version - } - - /// Returns the worker availability. - /// - /// A worker is available if it is healthy and ready to process requests. - pub fn is_available(&self) -> bool { - self.is_available - } - - /// Returns the worker name. - pub fn name(&self) -> String { - self.backend.addr.to_string() - } - - /// Returns whether the worker is healthy. - /// - /// This function will return `true` if the worker is healthy or the health status is unknown. - /// Otherwise, it will return `false`. - pub fn is_healthy(&self) -> bool { - !matches!(self.health_status, WorkerHealthStatus::Unhealthy { .. }) - } - - // PRIVATE HELPERS - // -------------------------------------------------------------------------------------------- - - /// Returns whether the worker should do a health check. - /// - /// A worker should do a health check if it is healthy or if the time since the first failure - /// is greater than the time since the first failure power of 2. - /// - /// The maximum exponent is [`MAX_BACKOFF_EXPONENT`], which corresponds to a backoff of - /// 2^[`MAX_BACKOFF_EXPONENT`] seconds. - fn should_do_health_check(&self) -> bool { - match self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => true, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts, - first_fail_timestamp, - reason: _, - } => { - let time_since_first_failure = first_fail_timestamp.elapsed(); - time_since_first_failure - > Duration::from_secs( - 2u64.pow(failed_attempts.min(MAX_BACKOFF_EXPONENT) as u32), - ) - }, - } - } - - /// Sets the health status of the worker. - /// - /// This function will update the health status of the worker and update the worker availability - /// based on the new health status. - fn set_health_status(&mut self, health_status: WorkerHealthStatus) { - let was_healthy = self.is_healthy(); - self.health_status = health_status; - match &self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => { - if !was_healthy { - self.is_available = true; - } - }, - WorkerHealthStatus::Unhealthy { .. } => { - WORKER_UNHEALTHY.with_label_values(&[&self.name()]).inc(); - self.is_available = false; - }, - } - } -} - -// PARTIAL EQUALITY -// ================================================================================================ - -impl PartialEq for Worker { - fn eq(&self, other: &Self) -> bool { - self.backend == other.backend - } -} - -// CONVERSIONS -// ================================================================================================ - -/// Conversion from a Worker reference to a `WorkerStatus` proto message. -impl From<&Worker> for ProxyWorkerStatus { - fn from(worker: &Worker) -> Self { - use miden_remote_prover::generated::remote_prover::WorkerHealthStatus as ProtoWorkerHealthStatus; - Self { - name: worker.name(), - version: worker.version().to_string(), - status: match worker.health_status() { - WorkerHealthStatus::Healthy => ProtoWorkerHealthStatus::Healthy, - WorkerHealthStatus::Unhealthy { .. } => ProtoWorkerHealthStatus::Unhealthy, - WorkerHealthStatus::Unknown => ProtoWorkerHealthStatus::Unknown, - } as i32, - } - } -} - -// HELPER FUNCTIONS -// ================================================================================================ - -/// Create a gRPC [`StatusApiClient`] for the given worker address. -/// -/// # Errors -/// - [`RemoteProverError::InvalidURI`] if the worker address is invalid. -/// - [`RemoteProverError::ConnectionFailed`] if the connection to the worker fails. -async fn create_status_client( - address: &str, - connection_timeout: Duration, - total_timeout: Duration, -) -> Result, RemoteProverError> { - let channel = Channel::from_shared(format!("http://{address}")) - .map_err(|err| RemoteProverError::InvalidURI(err, address.to_string()))? - .connect_timeout(connection_timeout) - .timeout(total_timeout) - .connect() - .await - .map_err(|err| RemoteProverError::ConnectionFailed(err, address.to_string()))?; - - Ok(WorkerStatusApiClient::new(channel)) -} - -/// Returns true if the version has major and minor versions match that of the required version. -/// Returns false otherwise. -/// -/// # Errors -/// Returns an error if either of the versions is malformed. -fn is_valid_version(version_req: &VersionReq, version: &str) -> anyhow::Result { - let received = Version::parse(version).context("Invalid worker version: {err}")?; - - Ok(version_req.matches(&received)) -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_valid_version() { - let version_req = VersionReq::parse("~1.0").unwrap(); - assert!(is_valid_version(&version_req, "1.0.0").unwrap()); - assert!(is_valid_version(&version_req, "1.0.1").unwrap()); - assert!(is_valid_version(&version_req, "1.0.12").unwrap()); - assert!(is_valid_version(&version_req, "1.0").is_err()); - assert!(!is_valid_version(&version_req, "2.0.0").unwrap()); - assert!(!is_valid_version(&version_req, "1.1.0").unwrap()); - assert!(!is_valid_version(&version_req, "0.9.0").unwrap()); - assert!(!is_valid_version(&version_req, "0.9.1").unwrap()); - assert!(!is_valid_version(&version_req, "0.10.0").unwrap()); - assert!(is_valid_version(&version_req, "miden").is_err()); - assert!(is_valid_version(&version_req, "1.miden.12").is_err()); - } -} diff --git a/bin/remote-prover/src/server/mod.rs b/bin/remote-prover/src/server/mod.rs new file mode 100644 index 000000000..2ca74f539 --- /dev/null +++ b/bin/remote-prover/src/server/mod.rs @@ -0,0 +1,103 @@ +use std::num::NonZeroUsize; + +use anyhow::Context; +use miden_node_utils::cors::cors_for_grpc_web_layer; +use miden_node_utils::panic::catch_panic_layer_fn; +use miden_node_utils::tracing::grpc::grpc_trace_fn; +use proof_kind::ProofKind; +use tokio::net::TcpListener; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::TcpListenerStream; +use tonic_web::GrpcWebLayer; +use tower_http::catch_panic::CatchPanicLayer; +use tower_http::trace::TraceLayer; + +use crate::generated::api_server::ApiServer; +use crate::server::service::ProverService; + +mod proof_kind; +mod prover; +mod service; +mod status; + +#[cfg(test)] +mod tests; + +/// A gRPC server providing a proving service for the Miden blockchain. +#[derive(clap::Parser)] +pub struct Server { + /// The port the gRPC server will be hosted on. + #[arg(long, default_value = "50051", env = "MIDEN_PROVER_PORT")] + port: u16, + /// The proof type that the prover will be handling. + #[arg(long, value_enum, env = "MIDEN_PROVER_KIND")] + kind: ProofKind, + /// Maximum time allowed for a proof request to complete. Once exceeded, the request is + /// aborted. + #[arg(long, default_value = "60s", env = "MIDEN_PROVER_TIMEOUT", value_parser = humantime::parse_duration)] + timeout: std::time::Duration, + /// Maximum number of concurrent proof requests that the prover will allow. + /// + /// Note that the prover only proves one request at a time; the rest are queued. This capacity + /// is used to limit the number of requests that can be queued at any given time, and includes + /// the one request that is currently being processed. + #[arg(long, default_value_t = NonZeroUsize::new(1).unwrap(), env = "MIDEN_PROVER_CAPACITY")] + capacity: NonZeroUsize, +} + +impl Server { + /// Spawns the prover server, returning its handle and the port it is listening on. + pub async fn spawn(&self) -> anyhow::Result<(JoinHandle>, u16)> { + let listener = TcpListener::bind(format!("0.0.0.0:{}", self.port)) + .await + .context("failed to bind to gRPC port")?; + + // We do this to get the actual port if configured with `self.port=0`. + let port = listener + .local_addr() + .expect("local address should exist for a tcp listener") + .port(); + + tracing::info!( + server.timeout=%humantime::Duration::from(self.timeout), + server.capacity=self.capacity, + proof.kind = %self.kind, + server.port = port, + "proof server listening" + ); + + let status_service = status::StatusService::new(self.kind); + let prover_service = ProverService::with_capacity(self.kind, self.capacity); + let prover_service = ApiServer::new(prover_service); + + let reflection_service = tonic_reflection::server::Builder::configure() + .register_file_descriptor_set(miden_node_proto_build::remote_prover_api_descriptor()) + .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) + .build_v1() + .context("failed to build reflection service")?; + + // Create a gRPC health reporter. + let (health_reporter, health_service) = tonic_health::server::health_reporter(); + + // Mark the service as serving + health_reporter.set_serving::>().await; + + let server = tonic::transport::Server::builder() + .accept_http1(true) + .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) + .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) + .layer(cors_for_grpc_web_layer()) + .layer(GrpcWebLayer::new()) + .timeout(self.timeout) + .add_service(prover_service) + .add_service(status_service) + .add_service(health_service) + .add_service(reflection_service) + .serve_with_incoming(TcpListenerStream::new(listener)); + + let server = + tokio::spawn(async move { server.await.context("failed while serving proof server") }); + + Ok((server, port)) + } +} diff --git a/bin/remote-prover/src/server/proof_kind.rs b/bin/remote-prover/src/server/proof_kind.rs new file mode 100644 index 000000000..ccd72ca30 --- /dev/null +++ b/bin/remote-prover/src/server/proof_kind.rs @@ -0,0 +1,35 @@ +use crate::generated as proto; + +/// Specifies the type of proof supported by the remote prover. +#[derive(Debug, Clone, Copy, PartialEq, clap::ValueEnum)] +pub enum ProofKind { + Transaction, + Batch, + Block, +} + +impl From for ProofKind { + fn from(value: proto::ProofType) -> Self { + match value { + proto::ProofType::Transaction => ProofKind::Transaction, + proto::ProofType::Batch => ProofKind::Batch, + proto::ProofType::Block => ProofKind::Block, + } + } +} + +impl std::fmt::Display for ProofKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProofKind::Transaction => write!(f, "transaction"), + ProofKind::Batch => write!(f, "batch"), + ProofKind::Block => write!(f, "block"), + } + } +} + +impl miden_node_utils::tracing::ToValue for ProofKind { + fn to_value(&self) -> opentelemetry::Value { + self.to_string().into() + } +} diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs new file mode 100644 index 000000000..3a163a190 --- /dev/null +++ b/bin/remote-prover/src/server/prover.rs @@ -0,0 +1,122 @@ +use miden_block_prover::LocalBlockProver; +use miden_node_proto::BlockProofRequest; +use miden_node_utils::ErrorReport; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::block::BlockProof; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; +use miden_tx::LocalTransactionProver; +use miden_tx_batch_prover::LocalBatchProver; +use tracing::instrument; + +use crate::COMPONENT; +use crate::generated::{self as proto}; +use crate::server::proof_kind::ProofKind; + +/// An enum representing the different types of provers available. +pub enum Prover { + Transaction(LocalTransactionProver), + Batch(LocalBatchProver), + Block(LocalBlockProver), +} + +impl Prover { + /// Constructs a [`Prover`] of the specified [`ProofKind`]. + pub fn new(proof_type: ProofKind) -> Self { + match proof_type { + ProofKind::Transaction => Self::Transaction(LocalTransactionProver::default()), + ProofKind::Batch => Self::Batch(LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL)), + ProofKind::Block => Self::Block(LocalBlockProver::new(MIN_PROOF_SECURITY_LEVEL)), + } + } + + /// Proves a [`ProofRequest`] using the appropriate prover implementation as specified during + /// construction. + pub fn prove(&self, request: proto::ProofRequest) -> Result { + match self { + Prover::Transaction(prover) => prover.prove_request(request), + Prover::Batch(prover) => prover.prove_request(request), + Prover::Block(prover) => prover.prove_request(request), + } + } +} + +/// This trait abstracts over proof request handling by providing a common interface for our +/// different provers. +/// +/// It standardizes the proving process by providing default implementations for the decoding of +/// requests, and encoding of response. Notably it also standardizes the instrumentation, though +/// implementations should still add attributes that can only be known post-decoding of the request. +/// +/// Implementations of this trait only need to provide the input and outputs types, as well as the +/// proof implementation. +trait ProveRequest { + type Input: miden_protocol::utils::Deserializable; + type Output: miden_protocol::utils::Serializable; + + fn prove(&self, input: Self::Input) -> Result; + + /// Entry-point to the proof request handling. + /// + /// Decodes the request, proves it, and encodes the response. + fn prove_request(&self, request: proto::ProofRequest) -> Result { + Self::decode_request(request) + .and_then(|input| { + // We cannot #[instrument] the trait's prove method because it lacks an + // implementation, so we do it manually. + tracing::info_span!("prove", target = COMPONENT).in_scope(|| { + self.prove(input).inspect_err(|e| tracing::Span::current().set_error(e)) + }) + }) + .map(|output| Self::encode_response(output)) + } + + #[instrument(target=COMPONENT, skip_all, err)] + fn decode_request(request: proto::ProofRequest) -> Result { + use miden_protocol::utils::Deserializable; + + Self::Input::read_from_bytes(&request.payload).map_err(|e| { + tonic::Status::invalid_argument(e.as_report_context("failed to decode request")) + }) + } + + #[instrument(target=COMPONENT, skip_all)] + fn encode_response(output: Self::Output) -> proto::Proof { + use miden_protocol::utils::Serializable; + + proto::Proof { payload: output.to_bytes() } + } +} + +impl ProveRequest for LocalTransactionProver { + type Input = TransactionInputs; + type Output = ProvenTransaction; + + fn prove(&self, input: Self::Input) -> Result { + self.prove(input).map_err(|e| { + tonic::Status::internal(e.as_report_context("failed to prove transaction")) + }) + } +} + +impl ProveRequest for LocalBatchProver { + type Input = ProposedBatch; + type Output = ProvenBatch; + + fn prove(&self, input: Self::Input) -> Result { + self.prove(input) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove batch"))) + } +} + +impl ProveRequest for LocalBlockProver { + type Input = BlockProofRequest; + type Output = BlockProof; + + fn prove(&self, input: Self::Input) -> Result { + let BlockProofRequest { tx_batches, block_header, block_inputs } = input; + self.prove(tx_batches, &block_header, block_inputs) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove block"))) + } +} diff --git a/bin/remote-prover/src/server/service.rs b/bin/remote-prover/src/server/service.rs new file mode 100644 index 000000000..4a72147a6 --- /dev/null +++ b/bin/remote-prover/src/server/service.rs @@ -0,0 +1,88 @@ +use std::num::NonZeroUsize; + +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use tokio::sync::{Mutex, MutexGuard, SemaphorePermit}; +use tracing::instrument; + +use crate::server::proof_kind::ProofKind; +use crate::server::prover::Prover; +use crate::{COMPONENT, generated as proto}; + +pub struct ProverService { + permits: tokio::sync::Semaphore, + prover: tokio::sync::Mutex, + kind: ProofKind, +} + +impl ProverService { + pub fn with_capacity(kind: ProofKind, capacity: NonZeroUsize) -> Self { + let permits = tokio::sync::Semaphore::new(capacity.get()); + let prover = Mutex::new(Prover::new(kind)); + Self { permits, prover, kind } + } + + fn is_supported(&self, kind: ProofKind) -> bool { + self.kind == kind + } + + #[instrument(target=COMPONENT, skip_all, err)] + fn acquire_permit(&self) -> Result, tonic::Status> { + self.permits + .try_acquire() + .map_err(|_| tonic::Status::resource_exhausted("proof queue is full")) + } + + #[instrument(target=COMPONENT, skip_all)] + async fn acquire_prover(&self) -> MutexGuard<'_, Prover> { + self.prover.lock().await + } +} + +#[async_trait::async_trait] +impl proto::api_server::Api for ProverService { + async fn prove( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + // Record X-Request-ID header for trace correlation + let request_id = request + .metadata() + .get("x-request-id") + .and_then(|v| v.to_str().ok()) + .unwrap_or("unknown"); + tracing::Span::current().set_attribute("request.id", request_id); + + // Check that the proof type is supported. + let request = request.into_inner(); + // Protobuf enums return a default value if the enum is set to an unknown value. + // This round trip checks that the value is valid. + if request.proof_type() as i32 != request.proof_type { + return Err(tonic::Status::invalid_argument("unknown proof_type value")); + } + let proof_kind = ProofKind::from(request.proof_type()); + tracing::Span::current().set_attribute("request.kind", proof_kind); + + // Reject unsupported proof types early so they don't clog the queue. + if !self.is_supported(proof_kind) { + return Err(tonic::Status::invalid_argument("unsupported proof type")); + } + + // This semaphore acts like a queue, but with a fixed capacity. + // + // We need to hold this until our request is processed to ensure that the queue capacity is + // not exceeded. + let _permit = self.acquire_permit()?; + + // This mutex is fair and uses FIFO ordering. + let prover = self.acquire_prover().await; + + // Blocking in place is fairly safe since we guarantee that only a single request is + // processed at a time. + // + // This has the downside that requests being proven cannot be cancelled since we are now + // outside the async runtime. This could occur if the server timeout is exceeded, or + // the client cancels the request. A different approach is technically possible, but + // would require more complex logic to handle cancellation in tandem with sync. + tokio::task::block_in_place(|| prover.prove(request)).map(tonic::Response::new) + } +} diff --git a/bin/remote-prover/src/api/status.rs b/bin/remote-prover/src/server/status.rs similarity index 51% rename from bin/remote-prover/src/api/status.rs rename to bin/remote-prover/src/server/status.rs index bb537b804..6922f7616 100644 --- a/bin/remote-prover/src/api/status.rs +++ b/bin/remote-prover/src/server/status.rs @@ -1,25 +1,26 @@ +use proto::worker_status_api_server::WorkerStatusApiServer; use tonic::{Request, Response, Status}; -use crate::api::prover::ProofType; use crate::generated::worker_status_api_server::WorkerStatusApi; use crate::generated::{self as proto}; +use crate::server::proof_kind::ProofKind; -pub struct StatusRpcApi { - proof_type: ProofType, +pub struct StatusService { + kind: ProofKind, } -impl StatusRpcApi { - pub fn new(proof_type: ProofType) -> Self { - Self { proof_type } +impl StatusService { + pub fn new(kind: ProofKind) -> WorkerStatusApiServer { + WorkerStatusApiServer::new(Self { kind }) } } #[async_trait::async_trait] -impl WorkerStatusApi for StatusRpcApi { +impl WorkerStatusApi for StatusService { async fn status(&self, _: Request<()>) -> Result, Status> { Ok(Response::new(proto::WorkerStatus { version: env!("CARGO_PKG_VERSION").to_string(), - supported_proof_type: self.proof_type as i32, + supported_proof_type: self.kind as i32, })) } } diff --git a/bin/remote-prover/src/server/tests.rs b/bin/remote-prover/src/server/tests.rs new file mode 100644 index 000000000..8172c344b --- /dev/null +++ b/bin/remote-prover/src/server/tests.rs @@ -0,0 +1,372 @@ +use std::collections::BTreeMap; +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Duration; + +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::note::NoteType; +use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; +use miden_protocol::transaction::{ExecutedTransaction, ProvenTransaction}; +use miden_testing::{Auth, MockChainBuilder}; +use miden_tx::utils::{Deserializable, Serializable}; +use miden_tx::{LocalTransactionProver, TransactionVerifier}; +use miden_tx_batch_prover::LocalBatchProver; + +use crate::generated::api_client::ApiClient; +use crate::generated::{Proof, ProofRequest, ProofType}; +use crate::server::Server; +use crate::server::proof_kind::ProofKind; + +/// A gRPC client with which to interact with the server. +#[derive(Clone)] +struct Client { + inner: ApiClient, +} + +impl Client { + async fn connect(port: u16) -> Self { + let inner = ApiClient::connect(format!("http://127.0.0.1:{port}")) + .await + .expect("client should connect"); + + Self { inner } + } + + async fn submit_request(&mut self, request: ProofRequest) -> Result { + self.inner.prove(request).await.map(tonic::Response::into_inner) + } +} + +impl ProofRequest { + /// Generates a proof request for a transaction using [`MockChain`]. + fn from_tx(tx: &ExecutedTransaction) -> Self { + let tx_inputs = tx.tx_inputs().clone(); + + Self { + proof_type: ProofType::Transaction as i32, + payload: tx_inputs.to_bytes(), + } + } + + fn from_batch(batch: &ProposedBatch) -> Self { + Self { + proof_type: ProofType::Batch as i32, + payload: batch.to_bytes(), + } + } + + async fn mock_tx() -> ExecutedTransaction { + // Create a mock transaction to send to the server + let mut mock_chain_builder = MockChainBuilder::new(); + let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); + + let fungible_asset_1: Asset = + FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) + .unwrap() + .into(); + let note_1 = mock_chain_builder + .add_p2id_note( + ACCOUNT_ID_SENDER.try_into().unwrap(), + account.id(), + &[fungible_asset_1], + NoteType::Private, + ) + .unwrap(); + + let mock_chain = mock_chain_builder.build().unwrap(); + + let tx_context = mock_chain + .build_tx_context(account.id(), &[note_1.id()], &[]) + .unwrap() + .disable_debug_mode() + .build() + .unwrap(); + + Box::pin(tx_context.execute()).await.unwrap() + } + + async fn mock_batch() -> ProposedBatch { + // Create a mock transaction to send to the server + let mut mock_chain_builder = MockChainBuilder::new(); + let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); + + let fungible_asset_1: Asset = + FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) + .unwrap() + .into(); + let note_1 = mock_chain_builder + .add_p2id_note( + ACCOUNT_ID_SENDER.try_into().unwrap(), + account.id(), + &[fungible_asset_1], + NoteType::Private, + ) + .unwrap(); + + let mock_chain = mock_chain_builder.build().unwrap(); + + let tx = mock_chain + .build_tx_context(account.id(), &[note_1.id()], &[]) + .unwrap() + .disable_debug_mode() + .build() + .unwrap(); + + let tx = Box::pin(tx.execute()).await.unwrap(); + let tx = tokio::task::block_in_place(|| { + LocalTransactionProver::default().prove(tx.tx_inputs().clone()).unwrap() + }); + + ProposedBatch::new( + vec![Arc::new(tx)], + mock_chain.latest_block_header(), + mock_chain.latest_partial_blockchain(), + BTreeMap::new(), + ) + .unwrap() + } +} + +// Test helpers for the server. +// +// Note: This is implemented under `#[cfg(test)]`. +impl Server { + /// A server configured with an arbitrary port (i.e. `port=0`) and the given kind. + /// + /// Capacity is set to 10 with a timeout of 60 seconds. + fn with_arbitrary_port(kind: ProofKind) -> Self { + Self { + port: 0, + kind, + timeout: Duration::from_secs(60), + capacity: NonZeroUsize::new(10).unwrap(), + } + } + + /// Overrides the capacity of the server. + /// + /// # Panics + /// + /// Panics if the given capacity is zero. + fn with_capacity(mut self, capacity: usize) -> Self { + self.capacity = NonZeroUsize::new(capacity).unwrap(); + self + } + + /// Overrides the timeout of the server. + fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } +} + +/// This test ensures that the legacy behaviour can still be configured. +/// +/// The original prover worker refused to process multiple requests concurrently. +/// This test ensures that the redesign behaves the same when limited to a capacity of 1. +/// +/// Create a server with a capacity of one and submit two requests. Ensure +/// that one succeeds and one fails with a resource exhaustion error. +#[tokio::test(flavor = "multi_thread")] +async fn legacy_behaviour_with_capacity_1() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_capacity(1) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client_a = Client::connect(port).await; + let mut client_b = client_a.clone(); + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request); + + let (first, second) = tokio::join!(a, b); + + // We cannot know which got served and which got rejected. + // We can only assert that one of them is Ok and the other is Err. + assert!(first.is_ok() || second.is_ok()); + assert!(first.is_err() || second.is_err()); + // We also expect that the error is a resource exhaustion error. + let err = first.err().or(second.err()).unwrap(); + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + + server.abort(); +} + +/// Test that multiple requests can be queued and capacity is respected. +/// +/// Create a server with a capacity of two and submit three requests. Ensure +/// that two succeed and one fails with a resource exhaustion error. +#[tokio::test(flavor = "multi_thread")] +async fn capacity_is_respected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_capacity(2) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + let mut client_a = Client::connect(port).await; + let mut client_b = client_a.clone(); + let mut client_c = client_a.clone(); + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request.clone()); + let c = client_c.submit_request(request); + + let (first, second, third) = tokio::join!(a, b, c); + + // We cannot know which got served and which got rejected. + // We can only assert that two succeeded and one failed. + let mut expected = [true, true, false]; + let mut result = [first.is_ok(), second.is_ok(), third.is_ok()]; + expected.sort_unstable(); + result.sort_unstable(); + assert_eq!(expected, result); + + // We also expect that the error is a resource exhaustion error. + let err = first.err().or(second.err()).or(third.err()).unwrap(); + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + + server.abort(); +} + +/// Ensures that the server request timeout is adhered to. +/// +/// We cannot actually enforce this for a request that has already being proven as the proof +/// is done in a blocking sync task. We can however check that a second queued request is rejected. +/// +/// This is tricky to test properly because we can't easily control the server's response time. +/// Instead we configure the server to have a ridiculously short timeout which should hopefully +/// always timeout. +#[tokio::test(flavor = "multi_thread")] +async fn timeout_is_respected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_timeout(Duration::from_nanos(10)) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client_a = Client::connect(port).await; + let mut client_b = Client::connect(port).await; + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request); + + let (a, b) = tokio::join!(a, b); + + // At least one of the requests should timeout. + let err = a.err().or(b.err()).unwrap(); + + assert_eq!(err.code(), tonic::Code::Cancelled); + assert!(err.message().contains("Timeout expired")); + + server.abort(); +} + +/// Ensures that an invalid proof kind is rejected. +/// +/// The error should be an invalid argument error, but since that is fairly broad we also inspect +/// the error message for mention of the invalid proof kind. This is technically an implementation +/// detail, but its the best we have without adding multiple abstraction layers. +#[tokio::test(flavor = "multi_thread")] +async fn invalid_proof_kind_is_rejected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .spawn() + .await + .expect("server should spawn"); + + let mut request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + request.proof_type = i32::MAX; + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await; + let err = response.unwrap_err(); + + assert_eq!(err.code(), tonic::Code::InvalidArgument); + assert!(err.message().contains("unknown proof_type value")); + + server.abort(); +} + +/// Ensures that a valid but unsupported proof kind is rejected. +/// +/// Aka submit a transaction proof request to a batch proving server. +/// +/// The error should be an invalid argument error, but since that is fairly broad we also inspect +/// the error message for mention of the unsupported proof kind. This is technically an +/// implementation detail, but its the best we have without adding multiple abstraction layers. +#[tokio::test(flavor = "multi_thread")] +async fn unsupported_proof_kind_is_rejected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await; + let err = response.unwrap_err(); + + assert_eq!(err.code(), tonic::Code::InvalidArgument); + assert!(err.message().contains("unsupported proof type")); + + server.abort(); +} + +/// Checks that the a transaction request results in a correct proof. +/// +/// The proof is verified and the transaction IDs of request and response must correspond. +#[tokio::test(flavor = "multi_thread")] +async fn transaction_proof_is_correct() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .spawn() + .await + .expect("server should spawn"); + + let tx = ProofRequest::mock_tx().await; + let request = ProofRequest::from_tx(&tx); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await.unwrap(); + let response = ProvenTransaction::read_from_bytes(&response.payload).unwrap(); + + assert_eq!(response.id(), tx.id()); + TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL).verify(&response).unwrap(); + + server.abort(); +} + +/// Checks that the a batch request results in a correct proof. +/// +/// The proof is replicated locally, which ensures that the gRPC codec and server code do the +/// correct thing. +#[tokio::test(flavor = "multi_thread")] +async fn batch_proof_is_correct() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) + .spawn() + .await + .expect("server should spawn"); + + let batch = ProofRequest::mock_batch().await; + let request = ProofRequest::from_batch(&batch); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await.unwrap(); + let response = ProvenBatch::read_from_bytes(&response.payload).unwrap(); + + let expected = tokio::task::block_in_place(|| { + LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL).prove(batch).unwrap() + }); + assert_eq!(response, expected); + + server.abort(); +} diff --git a/bin/remote-prover/src/utils.rs b/bin/remote-prover/src/utils.rs deleted file mode 100644 index 121491136..000000000 --- a/bin/remote-prover/src/utils.rs +++ /dev/null @@ -1,178 +0,0 @@ -use std::net::TcpListener; - -use http::{HeaderMap, HeaderName, HeaderValue}; -use miden_remote_prover::error::RemoteProverError; -use pingora::http::ResponseHeader; -use pingora::protocols::http::ServerSession; -use pingora::{Error, ErrorType}; -use pingora_proxy::Session; -use prost::Message; -use tonic::Code; -use tracing::debug; - -use crate::COMPONENT; -use crate::commands::PROXY_HOST; -use crate::proxy::metrics::QUEUE_DROP_COUNT; - -// CONSTANTS -// ================================================================================================ -const GRPC_CONTENT_TYPE: HeaderValue = HeaderValue::from_static("application/grpc"); -const GRPC_STATUS_HEADER: HeaderName = HeaderName::from_static("grpc-status"); -const GRPC_MESSAGE_HEADER: HeaderName = HeaderName::from_static("grpc-message"); - -/// Build gRPC trailers with status and optional message -fn build_grpc_trailers( - grpc_status: Code, - error_message: Option<&str>, -) -> pingora_core::Result { - let mut trailers = HeaderMap::new(); - - // Set gRPC status - let status_code = (grpc_status as i32).to_string(); - trailers.insert( - GRPC_STATUS_HEADER, - status_code.parse().map_err(|e| { - Error::because(ErrorType::InternalError, format!("Failed to parse grpc-status: {e}"), e) - })?, - ); - - // Set gRPC message if provided - if let Some(message) = error_message { - trailers.insert( - GRPC_MESSAGE_HEADER, - message.parse().map_err(|e| { - Error::because( - ErrorType::InternalError, - format!("Failed to parse grpc-message: {e}"), - e, - ) - })?, - ); - } - - Ok(trailers) -} - -/// Write a protobuf message as a gRPC response to a Pingora session -/// -/// This helper function takes a protobuf message and writes it to a Pingora session -/// in the proper gRPC format, handling message encoding, headers, and trailers. -pub async fn write_grpc_response_to_session( - session: &mut Session, - message: T, -) -> pingora_core::Result<()> -where - T: Message, -{ - // Serialize the protobuf message - let mut response_body = Vec::new(); - message.encode(&mut response_body).map_err(|e| { - Error::because(ErrorType::InternalError, format!("Failed to encode proto response: {e}"), e) - })?; - - let mut grpc_message = Vec::new(); - - // Add compression flag (1 byte, 0 = no compression) - grpc_message.push(0u8); - - // Add message length (4 bytes, big-endian) - let msg_len = response_body.len() as u32; - grpc_message.extend_from_slice(&msg_len.to_be_bytes()); - - // Add the actual message - grpc_message.extend_from_slice(&response_body); - - // Create gRPC response headers WITHOUT grpc-status (that goes in trailers) - let mut header = ResponseHeader::build(200, None)?; - header.insert_header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE)?; - - session.set_keepalive(None); - session.write_response_header(Box::new(header), false).await?; - session.write_response_body(Some(grpc_message.into()), false).await?; - - // Send trailers with gRPC status - let trailers = build_grpc_trailers(Code::Ok, None)?; - session.write_response_trailers(trailers).await?; - - Ok(()) -} - -/// Write a gRPC error response to a Pingora session -/// -/// This helper function creates a proper gRPC error response with the specified -/// status code and error message. -pub async fn write_grpc_error_to_session( - session: &mut Session, - grpc_status: Code, - error_message: &str, -) -> pingora_core::Result<()> { - // Create gRPC response headers (always HTTP 200 for gRPC) - let mut header = ResponseHeader::build(200, None)?; - header.insert_header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE)?; - - session.set_keepalive(None); - session.write_response_header(Box::new(header), false).await?; - - // gRPC errors don't have a body, just headers and trailers - session.write_response_body(None, false).await?; - - // Send trailers with gRPC status and error message - let trailers = build_grpc_trailers(grpc_status, Some(error_message))?; - session.write_response_trailers(trailers).await?; - - Ok(()) -} - -/// Create a gRPC `RESOURCE_EXHAUSTED` response for a full queue -pub(crate) async fn create_queue_full_response(session: &mut Session) -> pingora_core::Result<()> { - // Increment the queue drop count metric - QUEUE_DROP_COUNT.inc(); - - // Use our helper function to create a proper gRPC error response - write_grpc_error_to_session(session, Code::ResourceExhausted, "Too many requests in the queue") - .await -} - -/// Create a gRPC `RESOURCE_EXHAUSTED` response for rate limiting -pub async fn create_too_many_requests_response( - session: &mut Session, - max_request_per_second: isize, -) -> pingora_core::Result<()> { - // Use our helper function to create a proper gRPC error response - let error_message = - format!("Rate limit exceeded: {max_request_per_second} requests per second"); - write_grpc_error_to_session(session, Code::ResourceExhausted, &error_message).await -} - -/// Create a 400 response with an error message -/// -/// It will set the X-Error-Message header to the error message. -pub async fn create_response_with_error_message( - session: &mut ServerSession, - error_msg: String, -) -> pingora_core::Result<()> { - let mut header = ResponseHeader::build(400, None)?; - header.insert_header("X-Error-Message", error_msg)?; - session.set_keepalive(None); - session.write_response_header(Box::new(header)).await?; - Ok(()) -} - -/// Checks if a port is available for use. -/// -/// # Arguments -/// * `port` - The port to check. -/// * `service` - A descriptive name for the service (for logging purposes). -/// -/// # Returns -/// * `Ok(TcpListener)` if the port is available. -/// * `Err(RemoteProverError::PortAlreadyInUse)` if the port is already in use. -pub fn check_port_availability( - port: u16, - service: &str, -) -> Result { - let addr = format!("{PROXY_HOST}:{port}"); - TcpListener::bind(&addr) - .inspect(|_| debug!(target: COMPONENT, %service, %port, %addr, "Port is available")) - .map_err(|err| RemoteProverError::PortAlreadyInUse(err, port)) -} diff --git a/packaging/prover-proxy/miden-prover-proxy.service b/packaging/prover-proxy/miden-prover-proxy.service deleted file mode 100644 index 90a34c9d0..000000000 --- a/packaging/prover-proxy/miden-prover-proxy.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Miden delegated prover proxy -Wants=network-online.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=exec -Environment="OTEL_SERVICE_NAME=miden-prover-proxy" -EnvironmentFile=/lib/systemd/system/miden-prover-proxy.env -ExecStart=/usr/bin/miden-remote-prover start-proxy -WorkingDirectory=/opt/miden-prover-proxy -User=miden-prover-proxy -RestartSec=5 -Restart=always -LimitCORE=infinity diff --git a/packaging/prover-proxy/postinst b/packaging/prover-proxy/postinst deleted file mode 100644 index 275c8f2c7..000000000 --- a/packaging/prover-proxy/postinst +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# -# This is a postinstallation script so the service can be configured and started when requested. - -# User is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-prover-proxy - -# Working folder. -if [ -d "/opt/miden-prover-proxy" ] -then - echo "Directory /opt/miden-prover-proxy exists." -else - mkdir -p /opt/miden-prover-proxy -fi -sudo chown -R miden-prover-proxy /opt/miden-prover-proxy - -# Configuration folder -if [ -d "/etc/opt/miden-prover-proxy" ] -then - echo "Directory /etc/opt/miden-prover-proxy exists." -else - mkdir -p /etc/opt/miden-prover-proxy -fi -sudo chown -R miden-prover-proxy /etc/opt/miden-prover-proxy - -sudo systemctl daemon-reload -sudo systemctl enable miden-prover-proxy -sudo systemctl start miden-prover-proxy diff --git a/packaging/prover-proxy/postrm b/packaging/prover-proxy/postrm deleted file mode 100644 index 001360b5c..000000000 --- a/packaging/prover-proxy/postrm +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# -############### -# Remove miden-prover-proxy installs -############## -sudo rm -f /lib/systemd/system/miden-prover-proxy.* -sudo rm -rf /opt/miden-prover-proxy/ -sudo deluser miden-prover-proxy -sudo systemctl daemon-reload diff --git a/packaging/prover/miden-prover.service b/packaging/prover/miden-prover.service index a34eb26af..4aafc09ca 100644 --- a/packaging/prover/miden-prover.service +++ b/packaging/prover/miden-prover.service @@ -9,8 +9,7 @@ WantedBy=multi-user.target Type=exec Environment="OTEL_SERVICE_NAME=miden-prover" EnvironmentFile=/lib/systemd/system/miden-prover.env -ExecStart=/usr/bin/miden-remote-prover start-worker -WorkingDirectory=/opt/miden-prover +ExecStart=/usr/bin/miden-remote-prover User=miden-prover RestartSec=5 Restart=always diff --git a/packaging/prover/postinst b/packaging/prover/postinst index 9976ba33b..2069a4cb6 100644 --- a/packaging/prover/postinst +++ b/packaging/prover/postinst @@ -2,27 +2,9 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# User is expected by the systemd service file and `/opt/` is its working directory, +# User is expected by the systemd service file sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-prover -# Working folder. -if [ -d "/opt/miden-prover" ] -then - echo "Directory /opt/miden-prover exists." -else - mkdir -p /opt/miden-prover -fi -sudo chown -R miden-prover /opt/miden-prover - -# Configuration folder -if [ -d "/etc/opt/miden-prover" ] -then - echo "Directory /etc/opt/miden-prover exists." -else - mkdir -p /etc/opt/miden-prover -fi -sudo chown -R miden-prover /etc/opt/miden-prover - sudo systemctl daemon-reload sudo systemctl enable miden-prover sudo systemctl start miden-prover diff --git a/packaging/prover/postrm b/packaging/prover/postrm index d57bf2efc..a63357438 100644 --- a/packaging/prover/postrm +++ b/packaging/prover/postrm @@ -3,7 +3,5 @@ ############### # Remove miden-prover installs ############## -sudo rm -f /lib/systemd/system/miden-prover.* -sudo rm -rf /opt/miden-prover/ sudo deluser miden-prover sudo systemctl daemon-reload From a3f96e038da0277f3660bb935abbeacc9cd8f378 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Feb 2026 14:07:23 +0200 Subject: [PATCH 47/77] chore(deps): bump keccak from 0.1.5 to 0.1.6 (#1695) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 925fc1725..cc02c8333 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1850,9 +1850,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] From e2d908b358f1f927f131276d2be896110a96a696 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 23 Feb 2026 08:45:57 -0300 Subject: [PATCH 48/77] chore(ntx-builder): DataStore & DB writers refactor (#1689) --- crates/ntx-builder/src/actor/execute.rs | 68 ++++++++++++--- crates/ntx-builder/src/actor/mod.rs | 60 ++++++++++---- crates/ntx-builder/src/builder.rs | 27 +++++- .../db/migrations/2026020900000_setup/up.sql | 9 ++ crates/ntx-builder/src/db/mod.rs | 35 ++++---- crates/ntx-builder/src/db/models/conv.rs | 15 +++- .../ntx-builder/src/db/models/queries/mod.rs | 3 + .../src/db/models/queries/note_scripts.rs | 56 +++++++++++++ .../src/db/models/queries/notes.rs | 27 ------ .../src/db/models/queries/tests.rs | 82 +++++++++++-------- crates/ntx-builder/src/db/schema.rs | 9 +- crates/ntx-builder/src/lib.rs | 6 +- 12 files changed, 290 insertions(+), 107 deletions(-) create mode 100644 crates/ntx-builder/src/db/models/queries/note_scripts.rs diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 59e9cdb4f..09658cd23 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -55,6 +55,7 @@ use tracing::{Instrument, instrument}; use crate::COMPONENT; use crate::actor::account_state::TransactionCandidate; use crate::block_producer::BlockProducerClient; +use crate::db::Db; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -77,6 +78,12 @@ pub enum NtxError { type NtxResult = Result; +/// The result of a successful transaction execution. +/// +/// Contains the transaction ID, any notes that failed during filtering, and note scripts fetched +/// from the remote store that should be persisted to the local DB cache. +pub type NtxExecutionResult = (TransactionId, Vec, Vec<(Word, NoteScript)>); + // NETWORK TRANSACTION CONTEXT // ================================================================================================ @@ -100,6 +107,9 @@ pub struct NtxContext { /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + + /// Local database for persistent note script caching. + db: Db, } impl NtxContext { @@ -110,6 +120,7 @@ impl NtxContext { prover: Option, store: StoreClient, script_cache: LruCache, + db: Db, ) -> Self { Self { block_producer, @@ -117,6 +128,7 @@ impl NtxContext { prover, store, script_cache, + db, } } @@ -132,8 +144,9 @@ impl NtxContext { /// /// # Returns /// - /// On success, returns the [`TransactionId`] of the executed transaction and a list of - /// [`FailedNote`]s representing notes that were filtered out before execution. + /// On success, returns an [`NtxExecutionResult`] containing the transaction ID, any notes + /// that failed during filtering, and note scripts fetched from the remote store that should + /// be persisted to the local DB cache. /// /// # Errors /// @@ -146,7 +159,7 @@ impl NtxContext { pub fn execute_transaction( self, tx: TransactionCandidate, - ) -> impl FutureMaybeSend)>> { + ) -> impl FutureMaybeSend> { let TransactionCandidate { account, notes, @@ -168,6 +181,7 @@ impl NtxContext { chain_mmr, self.store.clone(), self.script_cache.clone(), + self.db.clone(), ); // Filter notes. @@ -178,6 +192,9 @@ impl NtxContext { // Execute transaction. let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; + // Collect scripts fetched from the remote store during execution. + let scripts_to_cache = data_store.take_fetched_scripts().await; + // Prove transaction. let tx_inputs: TransactionInputs = executed_tx.into(); let proven_tx = Box::pin(self.prove(&tx_inputs)).await?; @@ -188,7 +205,7 @@ impl NtxContext { // Submit transaction to block producer. self.submit(&proven_tx).await?; - Ok((proven_tx.id(), failed_notes)) + Ok((proven_tx.id(), failed_notes, scripts_to_cache)) }) .in_current_span() .await @@ -334,6 +351,11 @@ struct NtxDataStore { store: StoreClient, /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + /// Local database for persistent note script. + db: Db, + /// Scripts fetched from the remote store during execution, to be persisted by the + /// coordinator. + fetched_scripts: Arc>>, /// Mapping of storage map roots to storage slot names observed during various calls. /// /// The registered slot names are subsequently used to retrieve storage map witnesses from the @@ -366,6 +388,7 @@ impl NtxDataStore { chain_mmr: Arc, store: StoreClient, script_cache: LruCache, + db: Db, ) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(account.code()); @@ -377,10 +400,17 @@ impl NtxDataStore { mast_store, store, script_cache, + db, + fetched_scripts: Arc::new(Mutex::new(Vec::new())), storage_slots: Arc::new(Mutex::new(BTreeMap::default())), } } + /// Returns the list of note scripts fetched from the remote store during execution. + async fn take_fetched_scripts(&self) -> Vec<(Word, NoteScript)> { + self.fetched_scripts.lock().await.drain(..).collect() + } + /// Registers storage map slot names for the given account ID and storage header. /// /// These slot names are subsequently used to query for storage map witnesses against the store. @@ -507,28 +537,40 @@ impl DataStore for NtxDataStore { /// Retrieves a note script by its root hash. /// - /// This implementation uses the configured RPC client to call the `GetNoteScriptByRoot` - /// endpoint on the RPC server. + /// Uses a 3-tier lookup strategy: + /// 1. In-memory LRU cache. + /// 2. Local SQLite database. + /// 3. Remote store via gRPC. fn get_note_script( &self, script_root: Word, ) -> impl FutureMaybeSend, DataStoreError>> { async move { - // Attempt to retrieve the script from the cache. + // 1. In-memory LRU cache. if let Some(cached_script) = self.script_cache.get(&script_root).await { return Ok(Some(cached_script)); } - // Retrieve the script from the store. + // 2. Local DB. + if let Some(script) = self.db.lookup_note_script(script_root).await.map_err(|err| { + DataStoreError::other_with_source("failed to look up note script in local DB", err) + })? { + self.script_cache.put(script_root, script.clone()).await; + return Ok(Some(script)); + } + + // 3. Remote store. let maybe_script = self.store.get_note_script_by_root(script_root).await.map_err(|err| { - DataStoreError::Other { - error_msg: "failed to retrieve note script from store".to_string().into(), - source: Some(err.into()), - } + DataStoreError::other_with_source( + "failed to retrieve note script from store", + err, + ) })?; - // Handle response. + if let Some(script) = maybe_script { + // Collect for later persistence by the coordinator. + self.fetched_scripts.lock().await.push((script_root, script.clone())); self.script_cache.put(script_root, script.clone()).await; Ok(Some(script)) } else { diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 3b94bd8c3..ecb72552b 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -17,7 +17,7 @@ use miden_node_utils::lru_cache::LruCache; use miden_protocol::Word; use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::{Note, NoteScript}; +use miden_protocol::note::{Note, NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; @@ -29,6 +29,21 @@ use crate::builder::ChainState; use crate::db::Db; use crate::store::StoreClient; +// ACTOR NOTIFICATION +// ================================================================================================ + +/// A notification sent from an account actor to the coordinator. +pub enum ActorNotification { + /// One or more notes failed during transaction execution and should have their attempt + /// counters incremented. + NotesFailed { + nullifiers: Vec, + block_num: BlockNumber, + }, + /// A note script was fetched from the remote store and should be persisted to the local DB. + CacheNoteScript { script_root: Word, script: NoteScript }, +} + // ACTOR SHUTDOWN REASON // ================================================================================================ @@ -72,6 +87,8 @@ pub struct AccountActorContext { pub max_note_attempts: usize, /// Database for persistent state. pub db: Db, + /// Channel for sending notifications to the coordinator (via the builder event loop). + pub notification_tx: mpsc::Sender, } // ACCOUNT ORIGIN @@ -173,6 +190,8 @@ pub struct AccountActor { max_notes_per_tx: NonZeroUsize, /// Maximum number of note execution attempts before dropping a note. max_note_attempts: usize, + /// Channel for sending notifications to the coordinator. + notification_tx: mpsc::Sender, } impl AccountActor { @@ -207,6 +226,7 @@ impl AccountActor { script_cache: actor_context.script_cache.clone(), max_notes_per_tx: actor_context.max_notes_per_tx, max_note_attempts: actor_context.max_note_attempts, + notification_tx: actor_context.notification_tx.clone(), } } @@ -272,11 +292,6 @@ impl AccountActor { // Read the chain state. let chain_state = self.chain_state.read().await.clone(); - // Drop notes that have failed too many times. - if let Err(err) = self.db.drop_failing_notes(account_id, self.max_note_attempts).await { - tracing::error!(err = %err, "failed to drop failing notes"); - } - // Query DB for latest account and available notes. let tx_candidate = self.select_candidate_from_db( account_id, @@ -348,17 +363,20 @@ impl AccountActor { self.prover.clone(), self.store.clone(), self.script_cache.clone(), + self.db.clone(), ); let notes = tx_candidate.notes.clone(); let execution_result = context.execute_transaction(tx_candidate).await; match execution_result { // Execution completed without failed notes. - Ok((tx_id, failed)) if failed.is_empty() => { + Ok((tx_id, failed, scripts_to_cache)) if failed.is_empty() => { + self.cache_note_scripts(scripts_to_cache).await; self.mode = ActorMode::TransactionInflight(tx_id); }, // Execution completed with some failed notes. - Ok((tx_id, failed)) => { + Ok((tx_id, failed, scripts_to_cache)) => { + self.cache_note_scripts(scripts_to_cache).await; let nullifiers: Vec<_> = failed.into_iter().map(|note| note.note.nullifier()).collect(); self.mark_notes_failed(&nullifiers, block_num).await; @@ -377,16 +395,26 @@ impl AccountActor { } } - /// Marks notes as failed in the DB. - async fn mark_notes_failed( - &self, - nullifiers: &[miden_protocol::note::Nullifier], - block_num: BlockNumber, - ) { - if let Err(err) = self.db.notes_failed(nullifiers.to_vec(), block_num).await { - tracing::error!(err = %err, "failed to mark notes as failed"); + /// Sends notifications to the coordinator to cache note scripts fetched from the remote store. + async fn cache_note_scripts(&self, scripts: Vec<(Word, NoteScript)>) { + for (script_root, script) in scripts { + let _ = self + .notification_tx + .send(ActorNotification::CacheNoteScript { script_root, script }) + .await; } } + + /// Sends a notification to the coordinator to mark notes as failed. + async fn mark_notes_failed(&self, nullifiers: &[Nullifier], block_num: BlockNumber) { + let _ = self + .notification_tx + .send(ActorNotification::NotesFailed { + nullifiers: nullifiers.to_vec(), + block_num, + }) + .await; + } } // HELPERS diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index b642d0379..20090c5b9 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -14,7 +14,7 @@ use tokio_stream::StreamExt; use tonic::Status; use crate::NtxBuilderConfig; -use crate::actor::{AccountActorContext, AccountOrigin}; +use crate::actor::{AccountActorContext, AccountOrigin, ActorNotification}; use crate::coordinator::Coordinator; use crate::db::Db; use crate::store::StoreClient; @@ -98,9 +98,12 @@ pub struct NetworkTransactionBuilder { actor_context: AccountActorContext, /// Stream of mempool events from the block producer. mempool_events: MempoolEventStream, + /// Receiver for notifications from account actors (e.g., note failures). + notification_rx: mpsc::Receiver, } impl NetworkTransactionBuilder { + #[expect(clippy::too_many_arguments)] pub(crate) fn new( config: NtxBuilderConfig, coordinator: Coordinator, @@ -109,6 +112,7 @@ impl NetworkTransactionBuilder { chain_state: Arc>, actor_context: AccountActorContext, mempool_events: MempoolEventStream, + notification_rx: mpsc::Receiver, ) -> Self { Self { config, @@ -118,6 +122,7 @@ impl NetworkTransactionBuilder { chain_state, actor_context, mempool_events, + notification_rx, } } @@ -167,6 +172,10 @@ impl NetworkTransactionBuilder { Some(account_id) = account_rx.recv() => { self.handle_loaded_account(account_id).await?; }, + // Handle actor notifications (DB writes delegated from actors). + Some(notification) = self.notification_rx.recv() => { + self.handle_actor_notification(notification).await; + }, // Handle account loader task completion/failure. // If the task fails, we abort since the builder would be in a degraded state // where existing notes against network accounts won't be processed. @@ -285,6 +294,22 @@ impl NetworkTransactionBuilder { } } + /// Processes a notification from an account actor by performing the corresponding DB write. + async fn handle_actor_notification(&mut self, notification: ActorNotification) { + match notification { + ActorNotification::NotesFailed { nullifiers, block_num } => { + if let Err(err) = self.db.notes_failed(nullifiers, block_num).await { + tracing::error!(err = %err, "failed to mark notes as failed"); + } + }, + ActorNotification::CacheNoteScript { script_root, script } => { + if let Err(err) = self.db.insert_note_script(script_root, &script).await { + tracing::error!(err = %err, "failed to cache note script"); + } + }, + } + } + /// Updates the chain tip and prunes old blocks from the MMR. async fn update_chain_tip(&mut self, tip: BlockHeader) { let mut chain_state = self.chain_state.write().await; diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql index d8da128a9..68f3793d8 100644 --- a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql @@ -60,3 +60,12 @@ CREATE TABLE notes ( CREATE INDEX idx_notes_account ON notes(account_id); CREATE INDEX idx_notes_created_by ON notes(created_by) WHERE created_by IS NOT NULL; CREATE INDEX idx_notes_consumed_by ON notes(consumed_by) WHERE consumed_by IS NOT NULL; + +-- Persistent cache of note scripts, keyed by script root hash. +-- Survives restarts so scripts don't need to be re-fetched from the store. +CREATE TABLE note_scripts ( + -- Script root hash (Word serialized to 32 bytes). + script_root BLOB PRIMARY KEY, + -- Serialized NoteScript bytes. + script_data BLOB NOT NULL +) WITHOUT ROWID; diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs index 40709de7c..47352e29e 100644 --- a/crates/ntx-builder/src/db/mod.rs +++ b/crates/ntx-builder/src/db/mod.rs @@ -4,10 +4,11 @@ use anyhow::Context; use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::Word; use miden_protocol::account::Account; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::note::Nullifier; +use miden_protocol::note::{NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; use tracing::{info, instrument}; @@ -76,19 +77,6 @@ impl Db { .await } - /// Drops notes for the given account that have exceeded the maximum attempt count. - pub async fn drop_failing_notes( - &self, - account_id: NetworkAccountId, - max_attempts: usize, - ) -> Result<()> { - self.inner - .transact("drop_failing_notes", move |conn| { - queries::drop_failing_notes(conn, account_id, max_attempts) - }) - .await - } - /// Returns the latest account state and available notes for the given account. pub async fn select_candidate( &self, @@ -196,6 +184,25 @@ impl Db { .await } + /// Looks up a cached note script by root hash. + pub async fn lookup_note_script(&self, script_root: Word) -> Result> { + self.inner + .query("lookup_note_script", move |conn| { + queries::lookup_note_script(conn, &script_root) + }) + .await + } + + /// Persists a note script to the local cache. + pub async fn insert_note_script(&self, script_root: Word, script: &NoteScript) -> Result<()> { + let script = script.clone(); + self.inner + .transact("insert_note_script", move |conn| { + queries::insert_note_script(conn, &script_root, &script) + }) + .await + } + /// Creates a file-backed SQLite test connection with migrations applied. #[cfg(test)] pub fn test_conn() -> (diesel::SqliteConnection, tempfile::TempDir) { diff --git a/crates/ntx-builder/src/db/models/conv.rs b/crates/ntx-builder/src/db/models/conv.rs index 2a3299428..26bb99868 100644 --- a/crates/ntx-builder/src/db/models/conv.rs +++ b/crates/ntx-builder/src/db/models/conv.rs @@ -4,9 +4,10 @@ use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_node_proto::generated as proto; +use miden_protocol::Word; use miden_protocol::account::{Account, AccountId}; use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::note::{Note, Nullifier}; +use miden_protocol::note::{Note, NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; use miden_tx::utils::{Deserializable, Serializable}; use prost::Message; @@ -75,3 +76,15 @@ pub fn single_target_note_from_bytes( SingleTargetNetworkNote::try_from(proto_note) .map_err(|e| DatabaseError::deserialization("network note conversion", e)) } + +pub fn word_to_bytes(word: &Word) -> Vec { + word.to_bytes() +} + +pub fn note_script_to_bytes(script: &NoteScript) -> Vec { + script.to_bytes() +} + +pub fn note_script_from_bytes(bytes: &[u8]) -> Result { + NoteScript::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("note script", e)) +} diff --git a/crates/ntx-builder/src/db/models/queries/mod.rs b/crates/ntx-builder/src/db/models/queries/mod.rs index fedcaabe0..2ee11ee28 100644 --- a/crates/ntx-builder/src/db/models/queries/mod.rs +++ b/crates/ntx-builder/src/db/models/queries/mod.rs @@ -19,6 +19,9 @@ pub use accounts::*; mod chain_state; pub use chain_state::*; +mod note_scripts; +pub use note_scripts::*; + mod notes; pub use notes::*; diff --git a/crates/ntx-builder/src/db/models/queries/note_scripts.rs b/crates/ntx-builder/src/db/models/queries/note_scripts.rs new file mode 100644 index 000000000..09c03e4c1 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/note_scripts.rs @@ -0,0 +1,56 @@ +//! Database queries for persisting and retrieving note scripts. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_protocol::Word; +use miden_protocol::note::NoteScript; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +#[derive(Insertable)] +#[diesel(table_name = schema::note_scripts)] +struct NoteScriptInsert { + script_root: Vec, + script_data: Vec, +} + +#[derive(Queryable, Selectable)] +#[diesel(table_name = schema::note_scripts)] +struct NoteScriptRow { + script_data: Vec, +} + +/// Looks up a note script by its root hash. +pub fn lookup_note_script( + conn: &mut SqliteConnection, + script_root: &Word, +) -> Result, DatabaseError> { + let root_bytes = conversions::word_to_bytes(script_root); + + let row: Option = schema::note_scripts::table + .find(root_bytes) + .select(NoteScriptRow::as_select()) + .first(conn) + .optional()?; + + row.map(|r| conversions::note_script_from_bytes(&r.script_data)).transpose() +} + +/// Inserts a note script (idempotent via INSERT OR IGNORE). +pub fn insert_note_script( + conn: &mut SqliteConnection, + script_root: &Word, + script: &NoteScript, +) -> Result<(), DatabaseError> { + let insert = NoteScriptInsert { + script_root: conversions::word_to_bytes(script_root), + script_data: conversions::note_script_to_bytes(script), + }; + + diesel::insert_or_ignore_into(schema::note_scripts::table) + .values(&insert) + .execute(conn)?; + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/models/queries/notes.rs b/crates/ntx-builder/src/db/models/queries/notes.rs index c33b84702..1c0145a9b 100644 --- a/crates/ntx-builder/src/db/models/queries/notes.rs +++ b/crates/ntx-builder/src/db/models/queries/notes.rs @@ -152,33 +152,6 @@ pub fn notes_failed( Ok(()) } -/// Drops notes for the given account that have exceeded the maximum attempt count. -/// -/// # Raw SQL -/// -/// ```sql -/// DELETE FROM notes -/// WHERE account_id = ?1 AND attempt_count >= ?2 -/// ``` -#[expect(clippy::cast_possible_wrap)] -pub fn drop_failing_notes( - conn: &mut SqliteConnection, - account_id: NetworkAccountId, - max_attempts: usize, -) -> Result<(), DatabaseError> { - let account_id_bytes = conversions::network_account_id_to_bytes(account_id); - let max_attempts = max_attempts as i32; - - diesel::delete( - schema::notes::table - .filter(schema::notes::account_id.eq(&account_id_bytes)) - .filter(schema::notes::attempt_count.ge(max_attempts)), - ) - .execute(conn)?; - - Ok(()) -} - // HELPERS // ================================================================================================ diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs index 6ef55f9a2..0db95c018 100644 --- a/crates/ntx-builder/src/db/models/queries/tests.rs +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -426,39 +426,6 @@ fn available_notes_only_returns_notes_for_specified_account() { assert_eq!(result[0].to_inner().nullifier(), note_acct1.nullifier()); } -// DROP FAILING NOTES TESTS -// ================================================================================================ - -#[test] -fn drop_failing_notes_scoped_to_account() { - let (conn, _dir) = &mut test_conn(); - - let account_id_1 = mock_network_account_id(); - let account_id_2 = mock_network_account_id_seeded(42); - - let note_acct1 = mock_single_target_note(account_id_1, 10); - let note_acct2 = mock_single_target_note(account_id_2, 20); - - // Insert both as committed. - insert_committed_notes(conn, &[note_acct1.clone(), note_acct2.clone()]).unwrap(); - - // Fail both notes enough times to exceed max_attempts=2. - let block_num = BlockNumber::from(100u32); - notes_failed(conn, &[note_acct1.nullifier()], block_num).unwrap(); - notes_failed(conn, &[note_acct1.nullifier()], block_num).unwrap(); - notes_failed(conn, &[note_acct2.nullifier()], block_num).unwrap(); - notes_failed(conn, &[note_acct2.nullifier()], block_num).unwrap(); - - // Drop failing notes for account_id_1 only. - drop_failing_notes(conn, account_id_1, 2).unwrap(); - - // note_acct1 should be deleted, note_acct2 should remain. - assert_eq!(count_notes(conn), 1); - let remaining: Vec> = - schema::notes::table.select(schema::notes::nullifier).load(conn).unwrap(); - assert_eq!(remaining[0], conversions::nullifier_to_bytes(¬e_acct2.nullifier())); -} - // NOTES FAILED TESTS // ================================================================================================ @@ -513,6 +480,55 @@ fn upsert_chain_state_updates_singleton() { assert_eq!(stored_block_num, conversions::block_num_to_i64(block_num_2)); } +// NOTE SCRIPT TESTS +// ================================================================================================ + +#[test] +fn note_script_insert_and_lookup() { + let (conn, _dir) = &mut test_conn(); + + // Extract a NoteScript from a mock note. + let account_id = mock_network_account_id(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into(); + let script = note.script().clone(); + let root = script.root(); + + // Insert the script. + insert_note_script(conn, &root, &script).unwrap(); + + // Look it up — should match the original. + let found = lookup_note_script(conn, &root).unwrap(); + assert!(found.is_some()); + assert_eq!(found.unwrap().root(), script.root()); +} + +#[test] +fn note_script_lookup_returns_none_for_missing() { + let (conn, _dir) = &mut test_conn(); + + let missing_root = Word::default(); + let found = lookup_note_script(conn, &missing_root).unwrap(); + assert!(found.is_none()); +} + +#[test] +fn note_script_insert_is_idempotent() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into(); + let script = note.script().clone(); + let root = script.root(); + + // Insert the same script twice — should not error. + insert_note_script(conn, &root, &script).unwrap(); + insert_note_script(conn, &root, &script).unwrap(); + + // Should still be retrievable. + let found = lookup_note_script(conn, &root).unwrap(); + assert!(found.is_some()); +} + // HELPERS (domain type construction) // ================================================================================================ diff --git a/crates/ntx-builder/src/db/schema.rs b/crates/ntx-builder/src/db/schema.rs index 6a70ee121..93dca8ce5 100644 --- a/crates/ntx-builder/src/db/schema.rs +++ b/crates/ntx-builder/src/db/schema.rs @@ -17,6 +17,13 @@ diesel::table! { } } +diesel::table! { + note_scripts (script_root) { + script_root -> Binary, + script_data -> Binary, + } +} + diesel::table! { notes (nullifier) { nullifier -> Binary, @@ -29,4 +36,4 @@ diesel::table! { } } -diesel::allow_tables_to_appear_in_same_query!(accounts, chain_state, notes,); +diesel::allow_tables_to_appear_in_same_query!(accounts, chain_state, note_scripts, notes,); diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 5732cb43f..02c9f547c 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -11,7 +11,7 @@ use db::Db; use futures::TryStreamExt; use miden_node_utils::lru_cache::LruCache; use store::StoreClient; -use tokio::sync::RwLock; +use tokio::sync::{RwLock, mpsc}; use url::Url; mod actor; @@ -249,6 +249,8 @@ impl NtxBuilderConfig { let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + let (notification_tx, notification_rx) = mpsc::channel(1); + let actor_context = AccountActorContext { block_producer_url: self.block_producer_url.clone(), validator_url: self.validator_url.clone(), @@ -259,6 +261,7 @@ impl NtxBuilderConfig { max_notes_per_tx: self.max_notes_per_tx, max_note_attempts: self.max_note_attempts, db: db.clone(), + notification_tx, }; Ok(NetworkTransactionBuilder::new( @@ -269,6 +272,7 @@ impl NtxBuilderConfig { chain_state, actor_context, mempool_events, + notification_rx, )) } } From da8997aaf736d1a580369bd40f0a1ae5732ea236 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:30:11 +0200 Subject: [PATCH 49/77] docs: use org contributing.md (#1698) --- CONTRIBUTING.md | 126 ------------------------------------------------ README.md | 8 +-- 2 files changed, 5 insertions(+), 129 deletions(-) delete mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 94e683075..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,126 +0,0 @@ -# Contributing to Miden Node - -#### First off, thanks for taking the time to contribute! - -We want to make contributing to this project as easy and transparent as possible, whether it's: - -- Reporting a [bug](https://github.com/0xMiden/miden-node/issues/new?assignees=&labels=bug&projects=&template=1-bugreport.yml) -- Taking part in [discussions](https://github.com/0xMiden/miden-node/discussions) -- Submitting a [fix](https://github.com/0xMiden/miden-node/pulls) -- Proposing new [features](https://github.com/0xMiden/miden-node/issues/new?assignees=&labels=enhancement&projects=&template=2-feature-request.yml) - -  - -## Flow - -We are using [Github Flow](https://docs.github.com/en/get-started/quickstart/github-flow), so all code changes happen through pull requests from a [forked repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). - -### Branching - -- The current active branch is `next`. Every branch with a fix/feature must be forked from `next`. - -- The branch name should contain a short issue/feature description separated with hyphens [(kebab-case)](https://en.wikipedia.org/wiki/Letter_case#Kebab_case). - - For example, if the issue title is `Fix functionality X in component Y` then the branch name will be something like: `fix-x-in-y`. - -- New branch should be rebased from `next` before submitting a PR in case there have been changes to avoid merge commits. i.e. this branches state: - - ``` - A---B---C fix-x-in-y - / - D---E---F---G next - | | - (F, G) changes happened after `fix-x-in-y` forked - ``` - - should become this after rebase: - - ``` - A'--B'--C' fix-x-in-y - / - D---E---F---G next - ``` - - More about rebase [here](https://git-scm.com/docs/git-rebase) and [here](https://www.atlassian.com/git/tutorials/rewriting-history/git-rebase#:~:text=What%20is%20git%20rebase%3F,of%20a%20feature%20branching%20workflow.) - -### Commit messages - -- Commit messages should be written in a short, descriptive manner and be prefixed with tags for the change type and scope (if possible) according to the [semantic commit](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) scheme. For example, a new change to the `miden-node-store` crate might have the following message: `feat(miden-node-store): fix block-headers database schema` - -- Also squash commits to logically separated, distinguishable stages to keep git log clean: - - ``` - 7hgf8978g9... Added A to X \ - \ (squash) - gh354354gh... oops, typo --- * ---------> 9fh1f51gh7... feat(X): add A && B - / - 85493g2458... Added B to X / - - - 789fdfffdf... Fixed D in Y \ - \ (squash) - 787g8fgf78... blah blah --- * ---------> 4070df6f00... fix(Y): fixed D && C - / - 9080gf6567... Fixed C in Y / - ``` - -### Code Style and Documentation - -- For documentation in the codebase, we follow the [rustdoc](https://doc.rust-lang.org/rust-by-example/meta/doc.html) convention with no more than 100 characters per line. -- For code sections, we use code separators like the following to a width of 100 characters:: - - ``` - // CODE SECTION HEADER - // ================================================================================ - ``` - -- [Rustfmt](https://github.com/rust-lang/rustfmt), [Clippy](https://github.com/rust-lang/rust-clippy) and [Rustdoc](https://doc.rust-lang.org/rustdoc/index.html) linting is included in CI pipeline. Anyways it's preferable to run linting locally before push. To simplify running these commands in a reproducible manner we use `make` commands, you can run: - - ``` - make lint - ``` - -You can find more information about the `make` commands in the [Makefile](Makefile) - -### Testing - -After writing code different types of tests (unit, integration, end-to-end) are required to make sure that the correct behavior has been achieved and that no bugs have been introduced. You can run tests using the following command: - -``` -make test -``` - -### Versioning - -We use [semver](https://semver.org/) naming convention. - -  - -## Pre-PR checklist - -To make sure all commits adhere to our programming standards please follow the checklist: - -1. Repo forked and branch created from `next` according to the naming convention. -2. Commit messages and code style follow conventions. -3. Tests added for new functionality. -4. Documentation/comments updated for all changes according to our documentation convention. -5. Spellchecking ([typos](https://github.com/crate-ci/typos/tree/master?tab=readme-ov-file#install)), Rustfmt, Clippy and Rustdoc linting passed (run with `make lint`). -6. New branch rebased from `next`. - -  - -## Write bug reports with detail, background, and sample code - -**Great Bug Reports** tend to have: - -- A quick summary and/or background -- Steps to reproduce -- What you expected would happen -- What actually happens -- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) - -  - -## Any contributions you make will be under the MIT Software License - -In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern. diff --git a/README.md b/README.md index 696cb1990..1ee6afeb6 100644 --- a/README.md +++ b/README.md @@ -33,15 +33,17 @@ The documentation in the `docs/external` folder is built using Docusaurus and is Developer documentation and onboarding guide is available [here](https://0xMiden.github.io/miden-node/developer/index.html). -At minimum, please see our [contributing](CONTRIBUTING.md) guidelines and our [makefile](Makefile) for example workflows +At minimum, please see our [contributing](https://github.com/0xMiden/.github?tab=contributing-ov-file) guidelines and our [makefile](Makefile) for example workflows e.g. run the testsuite using ```sh make test ``` -Note that we do _not_ accept low-effort contributions or AI generated code. For typos and documentation errors please -rather open an issue. +In particular, please note that we do _not_ accept [low-effort contributions](https://github.com/0xMiden/.github?tab=contributing-ov-file#contribution-quality) or AI generated code. For typos and documentation errors please open an issue instead. + +> [!IMPORTANT] +> PRs will be closed unless you have been assigned an issue by a maintainer. ## License From 41a28040b187b046a5dfafba0de8f3bd903ca768 Mon Sep 17 00:00:00 2001 From: Marti Date: Mon, 23 Feb 2026 16:12:17 +0100 Subject: [PATCH 50/77] feat: read general account files at genesis (#1624) * feat: load genesis accounts from acc files * chore: move toml reading logic to genesisconfig struct * feat: sample config with agglayer accounts * chore: make tempfile a workspace dep * clippy * regen account files * Revert "regen account files" This reverts commit a89b4c98f7b45214f4092e22d774e35df4759646. * changelog * lints * chore: MissingFaucetDefinition error msg includes "account file" option * chore: nits * chore: should not rebuild when .mac changes * regen account files * feat: ensure reproducible account file generation * Apply suggestions from code review Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> * chore: use Display trait in errors * fix: include error context * feat: unify GenesisConfig & NativeFaucet structs * chore: rename NativeFaucet Toml -> Config * chore: rename AccountToml -> GenericAccountConfig * chore: refactor building NativeFaucet account into helper * chore: note about NO validation of generic accts * feat: simplify creation of native faucet * docs: update operator docs * chore: expect path to have parent --------- Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- CHANGELOG.md | 1 + Cargo.lock | 2 + Cargo.toml | 1 + bin/node/src/commands/store.rs | 3 +- crates/block-producer/Cargo.toml | 2 +- crates/rpc/Cargo.toml | 2 +- crates/store/Cargo.toml | 4 + crates/store/build.rs | 94 ++++++ crates/store/src/genesis/config/errors.rs | 16 +- crates/store/src/genesis/config/mod.rs | 194 ++++++++---- .../src/genesis/config/samples/01-simple.toml | 5 - .../config/samples/02-with-account-files.toml | 30 ++ .../agglayer_faucet_eth.mac | Bin 0 -> 8521 bytes .../agglayer_faucet_usdc.mac | Bin 0 -> 8521 bytes .../samples/02-with-account-files/bridge.mac | Bin 0 -> 8346 bytes crates/store/src/genesis/config/tests.rs | 295 +++++++++++++++++- docs/external/src/operator/usage.md | 27 +- 17 files changed, 600 insertions(+), 76 deletions(-) create mode 100644 crates/store/src/genesis/config/samples/02-with-account-files.toml create mode 100644 crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac create mode 100644 crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac create mode 100644 crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac diff --git a/CHANGELOG.md b/CHANGELOG.md index 171a649fe..829d43dc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). - Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). - Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). +- Added support for generic account loading at genesis ([#1624](https://github.com/0xMiden/miden-node/pull/1624)). - Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). - [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). diff --git a/Cargo.lock b/Cargo.lock index cc02c8333..e3f772dbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2541,6 +2541,7 @@ dependencies = [ "hex", "indexmap", "libsqlite3-sys", + "miden-agglayer", "miden-block-prover", "miden-crypto", "miden-node-db", @@ -2557,6 +2558,7 @@ dependencies = [ "rand_chacha", "regex", "serde", + "tempfile", "termtree", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 219fd51b6..ee8ef78dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,6 +102,7 @@ rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +tempfile = { version = "3" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 54c741e4d..7bf56f4a8 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -202,8 +202,7 @@ impl StoreCommand { // Parse genesis config (or default if not given). let config = genesis_config .map(|file_path| { - let toml_str = fs_err::read_to_string(file_path)?; - GenesisConfig::read_toml(toml_str.as_str()).with_context(|| { + GenesisConfig::read_toml_file(file_path).with_context(|| { format!("failed to parse genesis config from file {}", file_path.display()) }) }) diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index 023a7a448..474190ca6 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -52,6 +52,6 @@ pretty_assertions = "1.4" rand_chacha = { default-features = false, version = "0.9" } rstest = { workspace = true } serial_test = "3.2" -tempfile = { version = "3.20" } +tempfile = { workspace = true } tokio = { features = ["test-util"], workspace = true } winterfell = { version = "0.13" } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 926fe0ee8..276a4cf25 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -44,4 +44,4 @@ miden-protocol = { default-features = true, features = ["testing"], workspace miden-standards = { workspace = true } reqwest = { version = "0.12" } rstest = { workspace = true } -tempfile = { version = "3.20" } +tempfile = { workspace = true } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 82466fcba..fd97f9195 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -50,7 +50,10 @@ tracing = { workspace = true } url = { workspace = true } [build-dependencies] +fs-err = { workspace = true } +miden-agglayer = { branch = "next", features = ["testing"], git = "https://github.com/0xMiden/miden-base" } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miden-protocol = { features = ["std"], workspace = true } [dev-dependencies] assert_matches = { workspace = true } @@ -62,6 +65,7 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } +tempfile = { workspace = true } termtree = { version = "0.5" } [features] diff --git a/crates/store/build.rs b/crates/store/build.rs index a911bea19..cd6fca23f 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -1,6 +1,13 @@ // This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in // `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . +use std::path::PathBuf; +use std::sync::Arc; + +use miden_agglayer::{create_existing_agglayer_faucet, create_existing_bridge_account}; +use miden_protocol::account::{Account, AccountCode, AccountFile}; +use miden_protocol::{Felt, Word}; + fn main() { println!("cargo:rerun-if-changed=./src/db/migrations"); // If we do one re-write, the default rules are disabled, @@ -8,5 +15,92 @@ fn main() { // println!("cargo:rerun-if-changed=Cargo.toml"); + // Generate sample agglayer account files for genesis config samples. + generate_agglayer_sample_accounts(); miden_node_rocksdb_cxx_linkage_fix::configure(); } + +/// Generates sample agglayer account files for the `02-with-account-files` genesis config sample. +/// +/// Creates: +/// - `02-with-account-files/bridge.mac` - agglayer bridge account +/// - `02-with-account-files/agglayer_faucet_eth.mac` - agglayer faucet for wrapped ETH +/// - `02-with-account-files/agglayer_faucet_usdc.mac` - agglayer faucet for wrapped USDC +fn generate_agglayer_sample_accounts() { + // Use CARGO_MANIFEST_DIR to get the absolute path to the crate root + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set"); + let samples_dir: PathBuf = + [&manifest_dir, "src", "genesis", "config", "samples", "02-with-account-files"] + .iter() + .collect(); + + // Create the directory if it doesn't exist + fs_err::create_dir_all(&samples_dir).expect("Failed to create samples directory"); + + // Use deterministic seeds for reproducible builds + // WARNING: DO NOT USE THIS IN PRODUCTION + let bridge_seed: Word = Word::new([Felt::new(1u64); 4]); + let eth_faucet_seed: Word = Word::new([Felt::new(2u64); 4]); + let usdc_faucet_seed: Word = Word::new([Felt::new(3u64); 4]); + + // Create the bridge account first (faucets need to reference it) + // Use "existing" variant so accounts have nonce > 0 (required for genesis) + let bridge_account = create_existing_bridge_account(bridge_seed); + let bridge_account_id = bridge_account.id(); + + // Create AggLayer faucets using "existing" variant + // ETH: 18 decimals, max supply of 1 billion tokens + let eth_faucet = create_existing_agglayer_faucet( + eth_faucet_seed, + "ETH", + 18, + Felt::new(1_000_000_000), + bridge_account_id, + ); + + // USDC: 6 decimals, max supply of 10 billion tokens + let usdc_faucet = create_existing_agglayer_faucet( + usdc_faucet_seed, + "USDC", + 6, + Felt::new(10_000_000_000), + bridge_account_id, + ); + + // Strip source location decorators from account code to ensure deterministic output. + let bridge_account = strip_code_decorators(bridge_account); + let eth_faucet = strip_code_decorators(eth_faucet); + let usdc_faucet = strip_code_decorators(usdc_faucet); + + // Save account files (without secret keys since these use NoAuth) + let bridge_file = AccountFile::new(bridge_account, vec![]); + let eth_faucet_file = AccountFile::new(eth_faucet, vec![]); + let usdc_faucet_file = AccountFile::new(usdc_faucet, vec![]); + + // Write files + bridge_file + .write(samples_dir.join("bridge.mac")) + .expect("Failed to write bridge.mac"); + eth_faucet_file + .write(samples_dir.join("agglayer_faucet_eth.mac")) + .expect("Failed to write agglayer_faucet_eth.mac"); + usdc_faucet_file + .write(samples_dir.join("agglayer_faucet_usdc.mac")) + .expect("Failed to write agglayer_faucet_usdc.mac"); +} + +/// Strips source location decorators from an account's code MAST forest. +/// +/// This is necessary because the MAST forest embeds absolute file paths from the Cargo build +/// directory, which include a hash that differs between `cargo check` and `cargo build`. Stripping +/// decorators ensures the serialized `.mac` files are identical regardless of which cargo command +/// is used (CI or local builds or tests). +fn strip_code_decorators(account: Account) -> Account { + let (id, vault, storage, code, nonce, seed) = account.into_parts(); + + let mut mast = code.mast(); + Arc::make_mut(&mut mast).strip_decorators(); + let code = AccountCode::from_parts(mast, code.procedures().to_vec()); + + Account::new_unchecked(id, vault, storage, code, nonce, seed) +} diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index 4d360e925..3ea497d54 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use miden_protocol::account::AccountId; use miden_protocol::errors::{ AccountDeltaError, @@ -16,13 +18,21 @@ use crate::genesis::config::TokenSymbolStr; pub enum GenesisConfigError { #[error(transparent)] Toml(#[from] toml::de::Error), + #[error("failed to read config file at {1}")] + ConfigFileRead(#[source] std::io::Error, PathBuf), + #[error("failed to read account file at {1}")] + AccountFileRead(#[source] std::io::Error, PathBuf), + #[error("native faucet from file {path} is not a fungible faucet")] + NativeFaucetNotFungible { path: PathBuf }, #[error("account translation from config to state failed")] Account(#[from] AccountError), #[error("asset translation from config to state failed")] Asset(#[from] AssetError), #[error("adding assets to account failed")] AccountDelta(#[from] AccountDeltaError), - #[error("the defined asset {symbol:?} has no corresponding faucet")] + #[error( + "the defined asset '{symbol}' has no corresponding faucet, or the faucet was provided as an account file" + )] MissingFaucetDefinition { symbol: TokenSymbolStr }, #[error("account with id {account_id} was referenced but is not part of given genesis state")] MissingGenesisAccount { account_id: AccountId }, @@ -40,10 +50,10 @@ pub enum GenesisConfigError { BasicWallet(#[from] BasicWalletError), #[error(r#"incompatible combination of `max_supply` ({max_supply})" and `decimals` ({decimals}) exceeding the allowed value range of an `u64`"#)] OutOfRange { max_supply: u64, decimals: u8 }, - #[error("Found duplicate faucet definition for token symbol {symbol:?}")] + #[error("Found duplicate faucet definition for token symbol '{symbol}'")] DuplicateFaucetDefinition { symbol: TokenSymbolStr }, #[error( - "Total issuance {total_issuance} of {symbol:?} exceeds faucet's maximum issuance of {max_supply}" + "Total issuance {total_issuance} of '{symbol}' exceeds faucet's maximum issuance of {max_supply}" )] MaxIssuanceExceeded { symbol: TokenSymbolStr, diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 283208182..271c5a8bc 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -1,6 +1,7 @@ //! Describe a subset of the genesis manifest in easily human readable format use std::cmp::Ordering; +use std::path::{Path, PathBuf}; use std::str::FromStr; use indexmap::IndexMap; @@ -42,27 +43,55 @@ use self::errors::GenesisConfigError; #[cfg(test)] mod tests; +const DEFAULT_NATIVE_FAUCET_SYMBOL: &str = "MIDEN"; +const DEFAULT_NATIVE_FAUCET_DECIMALS: u8 = 6; +const DEFAULT_NATIVE_FAUCET_MAX_SUPPLY: u64 = 100_000_000_000_000_000; + // GENESIS CONFIG // ================================================================================================ +/// An account loaded from a `.mac` file (path relative to genesis config directory). +/// +/// Notice: Generic accounts are not validated (e.g. that their vault assets reference known +/// faucets), leaving the responsibility of ensuring valid genesis state to the operator. +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] +struct GenericAccountConfig { + path: PathBuf, +} + /// Specify a set of faucets and wallets with assets for easier test deployments. /// /// Notice: Any faucet must be declared _before_ it's use in a wallet/regular account. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] pub struct GenesisConfig { version: u32, timestamp: u32, - native_faucet: NativeFaucet, + /// Override the native faucet with a custom faucet account. + /// + /// If unspecified, a default native faucet will be used with: + /// + /// ```toml + /// symbol = "MIDEN" + /// decimals = 6 + /// max_supply = 100_000_000_000_000_000 + /// ``` + #[serde(default)] + native_faucet: Option, fee_parameters: FeeParameterConfig, #[serde(default)] wallet: Vec, #[serde(default)] fungible_faucet: Vec, + #[serde(default)] + account: Vec, + #[serde(skip)] + config_dir: PathBuf, } impl Default for GenesisConfig { fn default() -> Self { - let miden = TokenSymbolStr::from_str("MIDEN").unwrap(); Self { version: 1_u32, timestamp: u32::try_from( @@ -73,24 +102,38 @@ impl Default for GenesisConfig { ) .expect("Timestamp should fit into u32"), wallet: vec![], - native_faucet: NativeFaucet { - max_supply: 100_000_000_000_000_000u64, - decimals: 6u8, - symbol: miden.clone(), - }, + native_faucet: None, fee_parameters: FeeParameterConfig { verification_base_fee: 0 }, fungible_faucet: vec![], + account: vec![], + config_dir: PathBuf::from("."), } } } impl GenesisConfig { - /// Read the genesis accounts from a toml formatted string + /// Read the genesis config from a TOML file. + /// + /// The parent directory of `path` is used to resolve relative paths for account files + /// referenced in the configuration (e.g., `[[account]]` entries with `path` fields). /// /// Notice: It will generate the specified case during [`fn into_state`]. - pub fn read_toml(toml_str: &str) -> Result { - let me = toml::from_str::(toml_str)?; - Ok(me) + pub fn read_toml_file(path: &Path) -> Result { + let toml_str = fs_err::read_to_string(path) + .map_err(|e| GenesisConfigError::ConfigFileRead(e, path.to_path_buf()))?; + let config_dir = path.parent().expect("config file path must have a parent directory"); + Self::read_toml(&toml_str, config_dir) + } + + /// Parse a genesis config from a TOML formatted string. + /// + /// The `config_dir` parameter is stored so that relative paths for account files + /// (e.g., `[[account]]` entries with `path` fields, or native faucet file references) + /// can be resolved later during [`Self::into_state`]. + fn read_toml(toml_str: &str, config_dir: &Path) -> Result { + let mut config: Self = toml::from_str(toml_str)?; + config.config_dir = config_dir.to_path_buf(); + Ok(config) } /// Convert the in memory representation into the new genesis state @@ -108,10 +151,20 @@ impl GenesisConfig { fee_parameters, fungible_faucet: fungible_faucet_configs, wallet: wallet_configs, - .. + account: account_entries, + config_dir, } = self; - let symbol = native_faucet.symbol.clone(); + // Load account files from disk + let file_loaded_accounts = account_entries + .into_iter() + .map(|acc| { + let full_path = config_dir.join(&acc.path); + let account_file = AccountFile::read(&full_path) + .map_err(|e| GenesisConfigError::AccountFileRead(e, full_path.clone()))?; + Ok(account_file.account) + }) + .collect::, GenesisConfigError>>()?; let mut wallet_accounts = Vec::::new(); // Every asset sitting in a wallet, has to reference a faucet for that asset @@ -121,10 +174,21 @@ impl GenesisConfig { // accounts/sign transactions let mut secrets = Vec::new(); - // First setup all the faucets - for fungible_faucet_config in std::iter::once(native_faucet.to_faucet_config()) - .chain(fungible_faucet_configs.into_iter()) - { + // Handle native faucet: build from defaults or load from file + let (native_faucet_account, symbol, native_secret) = + NativeFaucetConfig(native_faucet).build_account(&config_dir)?; + if let Some(secret_key) = native_secret { + secrets.push(( + format!("faucet_{symbol}.mac", symbol = symbol.to_string().to_lowercase()), + native_faucet_account.id(), + secret_key, + )); + } + let native_faucet_account_id = native_faucet_account.id(); + faucet_accounts.insert(symbol.clone(), native_faucet_account); + + // Setup additional fungible faucets from parameters + for fungible_faucet_config in fungible_faucet_configs { let symbol = fungible_faucet_config.symbol.clone(); let (faucet_account, secret_key) = fungible_faucet_config.build_account()?; @@ -141,11 +205,6 @@ impl GenesisConfig { // we know the remaining supply in the faucets. } - let native_faucet_account_id = faucet_accounts - .get(&symbol) - .expect("Parsing guarantees the existence of a native faucet.") - .id(); - let fee_parameters = FeeParameters::new(native_faucet_account_id, fee_parameters.verification_base_fee)?; @@ -264,6 +323,9 @@ impl GenesisConfig { // Ensure the faucets always precede the wallets referencing them all_accounts.extend(wallet_accounts); + // Append file-loaded accounts as-is + all_accounts.extend(file_loaded_accounts); + Ok(( GenesisState { fee_parameters, @@ -277,36 +339,6 @@ impl GenesisConfig { } } -// NATIVE FAUCET -// ================================================================================================ - -/// Declare the native fungible asset -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(deny_unknown_fields)] -pub struct NativeFaucet { - /// Token symbol to use for fees. - symbol: TokenSymbolStr, - - decimals: u8, - /// Max supply in full token units - /// - /// It will be converted internally to the smallest representable unit, - /// using based `10.powi(decimals)` as a multiplier. - max_supply: u64, -} - -impl NativeFaucet { - fn to_faucet_config(&self) -> FungibleFaucetConfig { - let NativeFaucet { symbol, decimals, max_supply, .. } = self; - FungibleFaucetConfig { - symbol: symbol.clone(), - decimals: *decimals, - max_supply: *max_supply, - storage_mode: StorageMode::Public, - } - } -} - // FEE PARAMETER CONFIG // ================================================================================================ @@ -320,6 +352,54 @@ pub struct FeeParameterConfig { verification_base_fee: u32, } +// NATIVE FAUCET CONFIG +// ================================================================================================ + +/// Wraps an optional path to a pre-built faucet account file. +/// +/// When no path is provided, a default native faucet is built using hardcoded MIDEN defaults. +struct NativeFaucetConfig(Option); + +impl NativeFaucetConfig { + /// Build or load the native faucet account. + /// + /// For `None`, builds a new faucet from defaults and returns the generated secret key. + /// For `Some(path)`, loads the account from disk and validates it is a fungible faucet. + fn build_account( + self, + config_dir: &Path, + ) -> Result<(Account, TokenSymbolStr, Option), GenesisConfigError> { + match self.0 { + None => { + let symbol = TokenSymbolStr::from_str(DEFAULT_NATIVE_FAUCET_SYMBOL).unwrap(); + let faucet_config = FungibleFaucetConfig { + symbol: symbol.clone(), + decimals: DEFAULT_NATIVE_FAUCET_DECIMALS, + max_supply: DEFAULT_NATIVE_FAUCET_MAX_SUPPLY, + storage_mode: StorageMode::Public, + }; + let (account, secret_key) = faucet_config.build_account()?; + Ok((account, symbol, Some(secret_key))) + }, + Some(path) => { + let full_path = config_dir.join(&path); + let account_file = AccountFile::read(&full_path) + .map_err(|e| GenesisConfigError::AccountFileRead(e, full_path.clone()))?; + let account = account_file.account; + + if account.id().account_type() != AccountType::FungibleFaucet { + return Err(GenesisConfigError::NativeFaucetNotFungible { path: full_path }); + } + + let faucet = BasicFungibleFaucet::try_from(&account) + .expect("validated as fungible faucet above"); + let symbol = TokenSymbolStr::from(faucet.symbol()); + Ok((account, symbol, None)) + }, + } + } +} + // FUNGIBLE FAUCET CONFIG // ================================================================================================ @@ -548,6 +628,14 @@ impl From for TokenSymbol { } } +impl From for TokenSymbolStr { + fn from(symbol: TokenSymbol) -> Self { + // SAFETY: TokenSymbol guarantees valid format, so to_string should not fail + let raw = symbol.to_string().expect("TokenSymbol should always produce valid string"); + Self { raw, encoded: symbol } + } +} + impl Ord for TokenSymbolStr { fn cmp(&self, other: &Self) -> Ordering { self.raw.cmp(&other.raw) diff --git a/crates/store/src/genesis/config/samples/01-simple.toml b/crates/store/src/genesis/config/samples/01-simple.toml index d32403e85..2d7af4884 100644 --- a/crates/store/src/genesis/config/samples/01-simple.toml +++ b/crates/store/src/genesis/config/samples/01-simple.toml @@ -1,11 +1,6 @@ timestamp = 1717344256 version = 1 -[native_faucet] -decimals = 3 -max_supply = 100_000_000 -symbol = "MIDEN" - [fee_parameters] verification_base_fee = 0 diff --git a/crates/store/src/genesis/config/samples/02-with-account-files.toml b/crates/store/src/genesis/config/samples/02-with-account-files.toml new file mode 100644 index 000000000..ede3032b6 --- /dev/null +++ b/crates/store/src/genesis/config/samples/02-with-account-files.toml @@ -0,0 +1,30 @@ +# Genesis configuration example with AggLayer account files +# +# This example demonstrates how to include pre-built accounts from .mac files +# in the genesis block. The account files are generated by the build script +# using deterministic seeds for reproducibility. +# +# They demonstrate interdependencies between accounts: +# - bridge.mac: AggLayer bridge account for cross-chain asset transfers +# - agglayer_faucet_eth.mac: AggLayer faucet for wrapped ETH, depends on the bridge account. +# - agglayer_faucet_usdc.mac: AggLayer faucet for wrapped USDC, depends on the bridge account. +# +# Paths are relative to the directory containing this configuration file. + +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +# AggLayer bridge account for bridging assets to/from AggLayer +[[account]] +path = "02-with-account-files/bridge.mac" + +# AggLayer ETH faucet for wrapped ETH tokens +[[account]] +path = "02-with-account-files/agglayer_faucet_eth.mac" + +# AggLayer USDC faucet for wrapped USDC tokens +[[account]] +path = "02-with-account-files/agglayer_faucet_usdc.mac" diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac new file mode 100644 index 0000000000000000000000000000000000000000..ed79a49b1b58f01a656883fcee580301361de395 GIT binary patch literal 8521 zcmb7J1zc6nmw%UTL_k8NQ|i&uB_$vsp**@21w^{UCy12N-Jyhllz?;yN+^-N*woa0gme;O1 zSbE#KNJ`pSx?9`2{l@+GPw2$&K8OkPMQ4{=Znvy&IUIjcQqtAi)yC4z^1pxg-+Fj4 z46bgLPBxYrqph3eZ?^6fDVX3Sh9yWD`Y(K13YYca4u332 z5RQ=oIE0l6w4KpCTzy9Jn{)-sJh-u=Z zn2uWr3RId{C@n!v{SInqn*RL-S<@C&K~j3i?SZ*+6gO7e6>|+5Wz#00Ar(AvGd30%Ao1h=c@uu(Po zDJf757=3&YdJ`2AHF!=r@|J4a!{+FwK+z4uvXb(0bcdlv_Wvp`Z^L$+vEo$xz zQA5gm_A@Bjajqe(yPlP_NF-3B0M&sB{*445K~C{U`(Ma;5#;~W{RhFvO^Bk!WWT|sfCl14{i%j65Mi;KAyt; z#?DANo;V~VL5OR;N<>ACGHJT9hKAO6oTX(Sc9ShT<{S7JnOZ(xkXlWni1*GkCN<`V zk6caL_SUAuhOFHhIT;3r{J664sX<`gIMR|Xs<5wI){N@s-YOwUqh;l~gjk1gzrLMP(rKOCsHAnpmtzHD;f?Q!-kH$rtA)aD2&DNbO zPkYajSZrRUHF@T@(rA+!de%;s55@pPE4UPcma?=56714+13Z`H;sc4JAB6u>1-hMw2~IKuY{N)iMsprygKl@!oGQd&6)R}Pw3dnGJ1Ei z*-qv4mSvUgQss4sbIXQ^$*FB?tk6?Xi zYSwh=qgql79_#s+!$vOFro@zT#FO0UW*b6Gd^b_8Kh9R)&JU9?>jVW$BV8a1~*!M#+_!|R9CV}_PvTujX&eU;FQ&hySOcPSIMyOEcrzY&Uf7I{5gVy zOi{O;hO$H{v3)G0f${5YY^hlJH;#@BweW@n7?S*f52_osM(|H)h)ZkUCm?6C#7V#T~K#Rq) zrXW5^T($ccw--G=2`cFw=>jod797`eq;lC9epM$i;{M*cJyUwGwBeXWpt;hv;e$>SFE z-hGS1j8ys@t+f-itVBg9D!;Q)VBF+k+&5+=EMcop>E$tg4b%jS+OiUpRMf{HS(er??^ENAiNfG zoSdX96WY(as_)y9sAW>KNZ7KHu0uX;{xP&-qr^z{?C*&ZSJqAG|xMQ$m z^uu-L`XdH)`^>Z)*3%nLan{E%n)&Y6!nzSiAfiZj&F|y%ogG;th%<&;B#-2LL=RXhvu$A0g(`&384S$LlF1|~kSo=?4*sBIbIn8uUJz7@npXO zgUHt>+;}qwLR=5w;DDhmcZ1~@p`56XvJ(=7D=rsXh3%9MKgXSpd7zKuCv~2k2Vme$ zmI-2*+@=0IQw}ci?XmpyfDd@uO0vO**pV(Wyn71y!*QL4T*QuT;dT{BI_Xi)7M(4{< z+EfCQg`39EweWioZ0KdC6``p<5goPZ7s-tr!P8l)qzXA)7n|C<&fJh@GaO6B=O#`#joTy)xJUq@fd04MItdl6m1;c-{45mL zARkWh4cGT&U)?IE_s$1#@D8jg<1`?)8)S<5|@gv zhyozIhCIh-E?sU_vG?IBDPu-;8Abgx=;0iloveE#$F%)s%uWpC3+oQD-uBC5xH}>H ze85JvZU3C>HxfN{rw$!G{%i&B4G~gtFt5SS8=Bjk#W0_*x@%3aT4d1zfqm)Qn&|_X zN6ljDBxMC87Uuy3#%+iZzY-XvAgYfZ(G}9B&#Cz=`J_6b-)??{BZ^Fx@_QzxF7w1a zD_=aV?}@sZ6kP6L5i_DDxr9L6UQ900Rw}=^fLT5eXzvHxH5A^3mj=7O&{IgQ4D!8A zl1fk)DCiBJqxx~aIbo86s3^m9hQJ{#SLr1j$nU>tuiGrzHlBVbGj?id3*YwL@9}1SR9y8K)T9T3LWUl z`WsPJ;oCo0B0w-n{Ktn*k3}3%q`?;o#{H$l7Z)I~Ds+0z*+i0_5tG+Mu~)sNL7I7l*qtjo%yoG{j*!o&I_Q!S z6T`&!;d4I0o$%(pbX8({L9ngUk4t54!v{oo`|4tcK+U}{qf=F z=96Iruj`{t>@aV%$f^Xh9tz@T*)4H5s17U7ZIOo;P`fX4C|P;quVG3ruX@=^-uzo^fp& ze^kQMMU0it7r)ef4BG7{rcFmNk&>DEx&73bG9ybzsrAy!=>R@hSNhu?`HE<}^;fPJ z%WN(HAW-Av6A!-MRve^ZHP${BwJ?l@2eV%RmztQ!b})j5mXi zp|1;$!5}Gf^Tsu~fgbs*s!xcBhKue64T6C~!-rCYH%(Zo$TyPW#iYAhbUCdvlZ(XfzkcjzTFUvFMPmN#7QGr7KNS0_nrie9=hWD!!eOveN=Ny?rX1+ zP|(;>6o!v~r9pcoCoySwR6D*=={3=O>euM;dvv_HMzM4KYR*ycf!HbFan|zX*ORdH z=8(gpi(W2#nyQv;jT(8*Z{5hw&CA_?7lYw@?rSXcl6Za^ZM1k$=@gd0t(Spo*+EX= zt8yyE!h-~UbYVkF$-95_9Crx_UCBcEXfriTyd`8b5-VaT{x-m=5MPmv0vK4ATp(kY zX|9&;TkA7frdrY2+)@(B0p$_9gF|Ts?0PJj<}4xsia$ zBkt+EWTJOY5Uzo8dwM<{{yh=Zh%vwR%=P8lynCu^i1`wWKoqThhJxhXzH>I&p zi*;^}r*k36H8tVj^Jr%G&+9c7AU+uP(Zl?H{K`sQ;`61I@GSKE#y28-`Sn@rLdumE zqp{JC(lEL{oky-dW6|9~iL_@Q7uPF()wHbV(f{D|poeNZV3M{%%y_a9D%U^?ooBt(W&>JLj>U_Pdd<6*p(XYs~`_rI`e(a4X2_uL3@KI>48Q z#&<9x+gk`;6w1tgk?t>&#omS%w~c7~R3P30dVSM62jdTyB*wt6=1`_posMTVl=0uFE2Gh=6N= ziyrtSREr?fiBw$R9OyjbFtv)7u9PUYi_MBCEw|6?A23O2O$6WL77ri22E z4=%wsE+q7RyD#4qGE&fLzBuklRkB!i6JTIn%hDY~w}l&_8R~e%+*>#&=A7iJhLE zK15l&?G~pGrrgAY&)ys|IoOV9Cg;4DUuDTG59^)=d_mRK{Ry@Evq?EIURg`y^0XQ& zA9`L&j4T&R`j9GdOi*J35Loy7ciA+#ZcuIG1g3) zns;(nW9{MKfT79j9OpmaM`_X7N7?L!d42NA0bZ zo+VsSAx16ERs_A=U{G{klS=4D3g|o)G`Z;xfMIEW*o9zoh5x<>&X+di^fJdQ@!f(t`tt^B{!ykbpe`_9 zNPZYo_hf#ksFGX|&PNVNc1qp;fbPKAw%ug)%&GyE42CZf-%dJjv}Jg^>+|m^!Ju#<^f#yAkN@rD~D+%oPz?>5)NVUOpXyJLytP-K3=B4{wAu-Q{2K z0d)rNn1~p8PLJ1a@BE9eo22V~p;>iP;!-|QNZa1VbFd~F*_P^y(Jhg+5}bTchmdX! zK!?p;CP5V1!;=A0VZCuCLVydH z{0khB`|hJwLIyIKG-QKp0{+t@%}=0H!M%#+p)QeZZUF$p1b|kv;dM+(4Km!b%wII_ z?4|JPepMz%ty+lfDtzL?9$yUv7#KHt-wLPO#i4^{C{_o#P^r{u;2wqZs3dy3CtB*>XT8HUAn4rO1U0`^;}wSDJEEUld~E#~Zv>M^{g4OaU1#j! z@2G}RJhx7r3fpDAlY!W)E(6h1gAog+;5`KmYJ=zzCv(@J8fl?MSD)4`88R4Z5+}Sg zHY&WKwea#@bfe!IhELj7o+o?s^Du}1j=6`5c~b^aujl;*XM*Mu=_dqyv)mz|&M+Td z!3jOd+`RelmG$u)#906G!(D!<;6((hq>t4R!(&t^s0)n4@ov6e9u#`2XHEQ-M>igG z(k-6+4IXV6|N+OvN!L&mfBUwa_9m<>a@o_R{R~EzE@tMVT(aia?v%!r?zg%P9M191{v4b1 z!HuGy7lQ&P0Uw$b@C9@hA#U%CN2``0+Ps!V><1ntZoSdVA9pHO+V>YPua5-z!Z_2m z$DGNwDx{uAMMNzA>ZodM+ZlnYFA}ZIw0PxW#kx5$y6>NVsd&|wyL6>2p>7P4c>t?Euh~X9Il3WpBa@>=8@fDRis(mr(c_b2saE-(`GtwGXyRw~|s6 z!tm`#ISR{0grNFJUzRT^(3O~wvnH{-PkbWKF0YJ#dr}xs!G`t}mwV55`{IpQq-v7P zr!`;QfY%9~d%G<`>c&df*uAaeK>c7{pPrqVoJZXfghOo81ztr}NbHYnqh2)uUeScK zxK{f(@qi1)jjm&}BTrI3qrQwyg-lj6#?$G&{6?Gqs~4ZpN(GC#egeS2IO@$up5-Id z2c39V8pCepPIVP5-80qvpw=bs-=LX`Um%0Y!(@iD^eEIQ>}>1V)i9;_i+!g$JSS|i zuq7iW3*O&o>dTl4?6cZIGwaydD=TDVlw+?VBzvV3$;#f@lo7H*_J}e{Hla`?DHJC9$u|NaS4{HlYPF%P=B-SD_!bHnNPgHlrNzV5bG9#;ST*?+6y!!Wpe zSh?6*x!Jk{3Qw1-4%SX~7QdVTHO_V(R=?Qhi4Cv-10IG&R4?|w@M$YvGJq=ru^=JX zM@pb4fKmaa21*0;Bv2-xl0eS`RR^jE)EFr2?^U2~K;40Q0rdeI0W=+G2GC5PSwP=Y zVdLUL9N0YA2o9cq6C@5L5+jno3;zb;LE;ebkYg(8VGxQP4h;z3&tKfXe-WdYFhzV6 z^Y0RZ5|tqyPDfDJxQiN@V;DOxXV!))O3e(rIW%91;=$^;Y@tc3g1X?B{XN~7H!Ybd9CbE0K;;Mk$~}E6<>&o ziW1d?u`hs1R-6^+=*`(-;z6SB!K|>*^Pt-P0fC~!RHA+lGYq)%pTNKK@-JyYAh3C; z{!stw&EKx@A+V9S%)$qa_urOhw($|d(Q_a#kfltkTaCo;@d%;d@d^up5&`I@-wj~2 z2tl|A8Uhs2uVw@r0SYc117U@mz(6Fx2T3A^lTAm%1ApP5q!Y5de;5^HqPCA@qb)up z=!2I6ZuzGMT+~?%_y3GxfgOegiSy4al)svJc&L6CcM|2yhvn z^-qzX2ek)kf42Oo#Ag){1p<#>@}HG(^S`w6^B`zYVtv?%{P~&*l!@x~PYVq>bg22$ zL`@III8LMJrnpD2ZhKeLA(6luCCCm;@NXpe2y)6lw*Q5k4?+G<)qfEDJcKAZOhf`m zO58u;fg)zQM1UeU27<)>gXUuax%@AGEEk6Ozwn|1i?D@-fAum19%Ew_Qm0^HUhN~E zo-Lt0PxPNfga!q~9I0<<_~yR;Df-w*!3C2TEBe^tfl4`Z>K~Uv8Qu(Yq*$o#gnu>0 zK_J>-tioCA*237~@>}C7S>cWqS&rrIH%w%tO>(s+0*kGm1>=HPVO*c)1=|tcQgyBN z-OG>r&yZMdU7<6560q8An;w3~UXCBe06{Cd6{nMgjFXKUavTu4w!0E~h8#)SkFd>c zON)OAapT0yBFNXyUY8>Am4Y*iGCa*rYGJKlM^m~_SkFxlKJE;4>EHtIM;n|Qn%E|s zDT*={SMx&KXHLsGIz!3!8G1q9%koLV#Bui`e_920yN{AxWC_?uk$g-RyvQcK-q?M? zcxrzc>o9xd%@9u|5C<&Cb(Lz*u9cvL9WHb0<^T z$11vJ#F_O`9=qA+m4R5nd_bd?Z&g1{2Lm)S%B-ytz;-RO#neyVp;Fy6IqErNnkk7dH8D$Vd32eH6aTTnrduUB@bBlC(YQmf zu{`_HZ26sfYCIm>x#y$CZZ>AbRPw|#Jm^+iLX3acQSIN))ZQ$NkTmDEoV@JfZ^?!% zKQZ-%?Dzt$j;4pgX$&p^xvReuhkzgmj z*`1)C+E;WNydBybjNHZ}sV%6**bR{*x`ExTSS8uEN@|>KHPm~(#85AhTdF_dtiQ%V z=*Qo787Pan@UU$32|1{S}P|A-t#Zu6Dp>J3B?v?QEXI0gC_UU}1BD(0WRBPCw; zunoO`*YYSUogq(qouZzNs02kFa3&V?n*xkGARE)BlD#svI0xYl5uK-wR(FTjV5Q&r7Yu=JdfuIF_eN{_JljaGf0WSGdS5myOYbM^dnL_$Xr)`yEC2|v zg`6j5=qrQ|3a%LVx20&C)-4gXt!CVQ`(_#G+Z`_Zr&Z`rLHhD)#G`QTh%EzP-lN1T&Q=e5!GacVp(W zrE31WCMb~> zP-GbMlEQ;8dnnBP2zCw#+G;OUVF}8MeJ3|9Nx15EpcmVN2s9N!U+dQH+1Q*tvRB-) zj>D!pr_reOG|0MMR`cBe_WKBofAQk$1Z0#76;_?|h2uAN@QigBhF!N@PM@d{VA#FF zxF`VP1#>T=@kcKSp4Iso_ClScFHaU21**diNEbXebT0Ku5$wAg2m%a@qZr(+QIs;f z>(?!0Ep}o{D_KY`(f6&NX|wC4 zM;)rcX(BC?=z91Z1PgkFc~y9JKvY+K?pazhXXspx8mVF)_l1_up3{yp>_(I6m^d1k zdbcay=3Vr;^kiF|Tbb&@AEOMeM9qDuo@Ne*Y>On)ru}zBCs!hT{pWY z8`UbVK~hmfVtEcgVBDrS@e9FWN}|TNaXn!jhP=8@Qjcnr2kjTeIb+G>sJ>-m^0GkO zxBA)J=8l+$Y0;%FR&iq*l8Xq$&84(59p%akiM;N7 zROw_5!J_`i8LIoSG$Yc$E?5f@i+nWP0=t50w&AaFK7=8nFIve_a<%4udr)|j$D*(< zeuE}1W$e;St>rS%IonKUZmKC&I~GIXQrVarFax-bZo)v@cvk6|^>vma{X{1JaE& z_eAokM)GqEsMj#@BBR&_`N9*-18=Zwv(-!Mz3yNOzYsMWLRgGUM1&n9fL#Q<^9_|g zmvm#$Gbi9=tC`@f`2Z7k^O6&i?U7OJk(cT0%S#!4ZHWH5F9GYS0?el#{j^dPXYjP4 zvBqHn8D%+Sb&EBqQfbqMuq!msX}dxh^e-I8iM-ybOuoLsN3qHh=a+`26_M@=)xwAR za)HKFHF)@er!jUrr}^l0ig$!Q&S|~fn#7yA+D<_9Mt;_LnADu4^VNU%6b zHd6m!$k@*N!>7{eg6G%7>M3tGOKi=AeSh`3YyAUwk=o$(gB=IYU`WnEyVa%CpL%)1 zJ-0a6XhHn7sEUC!w@>qKj2h@)n9nwb^t8Bw7K)?hH7(NIE6o0E#ZkW7J#vIXR?T6L zthhMFzqg+X32sHU?r(U%86f-mbZu2JWV^om^3BaV{E?p@m;Su)0Cecx6VI=R5H;kd zJdi96R%JSwGvsCnDOnN`<}Tg$KNhqA`wa#7a4ETW+@vo@!2d*YdW#~Oq1E+Ux6Zr! zpIT2u5WH-RGquN@XpuF^=6#gJPjcH59jTA1&u)`P7SVXFa4K7S1w_r`?oOOc}>7HmZZ~I~`1-vY6uz&O;m5oHK#sisD8@buTmE5$&LS4hhTkU5#o1SoQ znY>fRoHZxQM5*^P$m;?=*sjdieezYYPMa^> z&&Y3Ppp(Ud)QN76uF7Pno_b2xn;NN(;agm(cxi;!|M>A4gF1N=>ZjA8SGw9h`d9N8 z3YT)#);@H{;1F6F#4nj|jM(xxQTqRtzQWde-8T?};X!OWs|L@Kz92yf^lk#8s zjfF!dk7F@>0;^3rt9dD@qZ2ww&B`x{7Sg}OP2HjA%QsG#A5?dZg)hWT0!_40sJxbn zr9Y1x6{S(|>J$B4={y#UXJ~*eYwj(}SLB>?>UNO_ohTyfCiXhL%IpZBXL9Q)Zy5+4-mH z*0^UQaql@32&?kj&3DhkMuDJL*Geb9IL9zVSFJYlS7_=T?ArK~56rBcGYtL6xTv+6 zjEN)h@q$!}Z(azlp-N|FAwB*bQMKsFfX?iVm79F~YU@)LfQuFoo?DxFb&VrOl9%s6 z^ME$n{5)^>Vyb(3^5Lh6?A{;O>MVhOFz%g~#oeUU)rOR(%d3$&=&|NkqWp!8IUB+% z)s_vFG;dU2flx*4E?1wD{ApY6<; zeKAhwqk;Dg%W!Sv+Nvf+gSfoO*)1&DY$+Rnf%kR|v39Dg+_x_s;|x)@F~y( zz5+D9lQG%;V(5}^cJ8yxK+z2LJ^gMOi#o;o+WXc4x%L19D0}W11x{&a!l@Xy48;@*2R{9qyQxQ<+7yi|r=n*Z8vu#mxDRiX^=z)|% zx(c`$fX+a*IAkx$8E1&v5#sR2cLEAi%%hqa1 zF0y><7V3CDx&P~3g_f}KqIQd=DR1hsrHbnS1KYJK-!<}BbPUhZz+)Lgll|>L>Z|Qc*{K6l~TI4d^F;n!7+oQmaN)u$Co$1Vf;(! z_U`s0%Gv9*JasthAue*}`iSY_PIN0d*PX%|D;5RV?kT_*Qd>KiTz~K}H80*LXL(A2 zPIL8b-*d_Fl~O4`Qf1C*8f*Xp>;B*#yB7C#N}IcSesfQ0*V%)wF!&5ITbZT%kgB|l zc;)E|=0BJ(NcMgGljqF_D=lg+!LCYi>E(2X(u{L2+cj?P%5aEp-vn{MxJdn`LPFxx z&E{%MhHlT&1ShY$bl%N6mcdmtq&tv_CTGOhy(PG0Z1B*hOyftGpTJPOyO%Oc^oO;0 z8)hx7Tls4V4zP1T(Buuy3vcmbwdw67p`bO}Gaf!V@4Lk+GRQ{c_VgWf^M1h)XiNMF z2OH%l$(L1$QA;1ILY{9jD!HypC-))+bsr0vUiSpRGeBcc>C>t?kQ!5QD4XC(J84Ju z$RF->Br{kCiV*dQ3i5Dc{QG`A5!tE1nzDrI} zk|GT|u2WlIvan^%Tr}Jh-7v>)1Uh%QR&*hIRa8!9e3+PzUzgxkrZjUeDe2UG$B35O z0*ihi&)|%Sh*#k9dg<{du=JW~rv7KzH4kNO6^dfI&JNzgb+MR^bbpN9GC6CZnR^Wg znf4%b#Qash-0rZd_r*_p6Vo;}mPn{4y#N>dj|@j6Ui;Y<3wA|~XTJE0Wjp2T-#35c zjGndNUPmEb*M9{Lw!wBgpJmm^)5yAhp!!rS*)Y3c;M{oYBj{vkzmi3`TMWBL5Wp}4p!LVd24>|ZS)Pw9 zpEYmoKj72*qC$>ZvlQP`e9w(NwH6F8FmB?mHBPUaQy1+>f-Z8g`f%5r^!%XZOx<&r zI0u`LZ}{(mF$%{~P4s$StlV?JW|wiDI3|w95ih3O zuGpjBP)!qf9^JZCb}RfRg0a`!hT>+2qZiG>bHF-=PsUDxH+SOGC}-fVg_o*DOBPYT_uWNTg4QybM+E#IdBQ-R zVLrSf3Vo^kf`!P{jj24uduok@VpTSscg19k zZY7T}HvdF{Sae79I%mDs60PKOrWgh}?As@$CkgmP$bxU?UpUAkl)Vr9V+CO9A%=TS z6Y)4Mn&%=(;@G2P>L^RUeqG6U6;Tnh;_cRX8e^9}{=+-@G_A?KSo5emYg#^z4J~SKP3+gUG+}xdtQ>#F9_$-e*3`M1EztS(9a;a242$ZO7i~;e& zIJ1t2TxoWyq~6CRM67`ts9GJn4}w>orP!Ej^T{WO_i|zEjy?Tc^63^hRdymDEQzRZ-IEtSJ_`aOY8Wp&c)6C!{L7PPm7{22eu3yuj$ zwN%;n>;8H{FO$3X_u4`N7(0zd`fDN*zedUeCmRH z;t1(*tq*XL02hp#*udsMo}hX{a|xRonWk=nr`vz&m5#s{AAaH0Dpm`FWPpKjG+R;L zmE$yr-FTOqBd+Jq_7pAOG1Gdh-Xjs%q?M0fB#Vi|^aEG#(mMYXt%Y-UBqWd$?GVH~`6_g51 zJ|v}Sdj*4|oOEWRGa_dFq>&{wxIN&SrhA~>+J!4wzasY{j_saf1O|6))bA?ioQ*;{ z|GqkQmmKxpo2dFG$q*Af7WR+YlB|pnEH?=uN{ALZ1+hUK5EsM)@k0ENAS4FKLW+K4=gch2BAP&?2-BZ9#j` zF@%Q&FQa~>N)c~pw)C8zC z&}%^5fqDS-2I>nm5@8AP9+gh3ugl#00gmEg*AvY2#A#lAQBSr zi-Vnmnd%V^)dlVBPXNPmhLM2rKNVkyii#4|gt0GxN>-W`=;+PaW8y)g?!&CG(DR_$ z{sn=e!#qUc{l@(Pcm5mrPhS2bEeHfQ57j@^zen?rD|`rSBrdb?QR9R6<(X}KgmCm6 z$ctnt(;C(z@drFYD0sfYLZD0lhUrfSm@PsOE`o*tMfAHH!A5|BTR(xY!d+k@65xX* z5yQ!*qv3%+a8NP{Sw26E3o=o=Cvwr2^9csvrGR_>tpgWz9>e{A$FRT-!-B;5cNWUu z-8?*0e;PdW|L~sq=NW<*MSwAeIsqZTZG6^$n*2Pd1Mu`$&p!|GSp`Ia!1I^<*F(7b zKYICj5Hu*UK5Rt(V$B4~RPDy6rG^|j)Z$s9rblBOXHj%h+#^_be5&b?NMMZ;WCter zFA{tNIpsgL|AU+lLH=)#|3UEc5TfWX5eXnEasLSqG~uNP7SQDW-9jL7|3UMyfL#6` zKP(rA_`mR?gNm?)g@2DSy!NrN3TaTVFt7I!PtTUnULgA4O@t-|#2jT{W%TZW!PEcY z-xB{1@fiOR2=t?m63vRjR%Vw^&D_5wGDC3FT^6&hBLXMS%D7OR5RxDybY7*PVkeljJ=h{5 z8oSQXag2J(SN!1{`Vf;|IaQQiORJ3c&OAOn{=2_IUB}+;j`Oy>!zMWy28aBxzWlLC zaLF{rnm)F8pi(L{PwtNR)kYUmQ%UM zO;cGJ(_HO|pkkXBA-Et`7}uwD(QbsdR71Oc|H{+;b0k(fSLw{21+F*SrAM4|kmrXn zK+sC=#pxuW<7DGT97lw1?QVo#p(j!fBW!cKGU8uC-8nI<2=c9q_vI*j<&eyx3@`K3 z+E{DY(Ucw&HgnU1Pdme0JGj96(FW&+CbkJ@ilU6g&7#omh4X5T?ohHrhJLWmszOo- zaoqi=Usi$L?vrF!IRf@kBp;I{FS1F$H}+64o;pC*CfoseE7VIF!~qL(Q?=TwYb|(b zkITZQIf1mJ?MZ&^q)|4%b>$VelJwZK#DEL-v5Kx4ac*;z$NUwpw8gyT`BM29y@cAn z;a!&Lo+ZNlMf_iM2;|n36#LHnn9mb>_KK|j{ap6b1^pG-Wy2| z(9mmFZaB8sSLwPfFm$y|Y?PCf$(X*rL|pZ{6!SsPJ1}sHuwAQcF^#i#snoVjPkN4- zW=i5qP0dqVo?Kz##D8kI?OsU^{QEU$H185@tD2`>Ef3Gytb0k=^YoOBiY$;{#NYy^93;;Z9A}Gp?l)DW$cP_dA&pF5?_tX%D86xV zVXTKYBtVel_oY|HEb+57q@Tzw-JftiK=UYU;g4Mg%3>}&Y}*d3fQs`$#cvcAkfX?5 zzOr2Xfk=XugtLlg!1%(IH~qCz{)M~J;$@H9(1-V|PO{P&@^m&S>e+}&P}G6vV!^m6 z!nh-{F&!$ot7FS^5bhAs1?o6H&o#>8V2T6A^{$zLbn!-vfBo*vn?E>aZv059ULqQj z*A>VKz$&BZbqG+nB%=G!VuBIy!E;*2Gj&e>vzpd{@u~K7CU%?mnvPB_C#dpQai)fD zovvB0FnsLf%S6-41%=dVfHuhL?(!P0^}GCrA<#`PyR+@zX-{8Z z8|e*rD0Hpfj}6Pp=h@~#Ni!c>`L+xT0K&D9^VAG|h44|qRl|U`6dkj=6~ealOg-{B z%MTG%+hr!|=YB7g$VTQjD+%h`o$&}-p?#x$lkXn$*B>%!I%a3&v7Om|lCO)wmAI0P zTPZP?nNz4uLZvtJq9+o!L}UHeKOHuj%qT(~aJ7Qpkuts?4cqZn-D=^u^txV;B>QS< zB1yb{*WN;^a1^C=3Gu`S}uTr?XG+;ng15p#D}TFg1PI3*kgeQPxX}C zM3%UmF#d`6eWan!L`17D^2rhSCYTFKqy-ik#k{8QMKu2CCBd`4FvDJ`oAl+`5~DzM z_z~%n*Ou<(UTK0u4?{tKfpHXr`!!0^=J))&g>1x5?Pw

1HlTJdRJAc<4>Cc31#5 zIf`%`L)x4Q!Wv@@>J%mSFJ1C`?{C)ZcKJz%T1c8m%OtuUJ_o^qUSnPto*fX?)0lga z*320;m!nRql*fIsrL*U(lPtUOWI85}2BzNKiuZY!d@n!S)!N-wEXiSJN1sE^B!MG3^ZN4^b7=YmHf6vNhQF# z2ET7;9!pl^LjKyGO`%%R6)ObxrLUXj_vIe8iffWo6p>g-0tk%T7AJlsI7~^@7&opj ztjmyB_euImZStVQ(l}=1D_N5K-yPsVT#&YC#%_+>4E9SS_dNeR3rI0hSVFFc##j;2KgcqEP`&b?6TF% z=)dh?i@5l3Hk7a!nTQDgi2!yH@Xj|(Ml$KW+lzsw>=q@bq}rn%vPWH^ zb0{xm_`M%IikRYjOjBl>xzD9+$nBNNTz1TxBU$oe*GaHaCLEn!z!kn?VZ3K(BF zj#GKPRhfK!gHK{rBrdEBO)DWi6sv`g4djDNsA}-=gU({?c24ur>lW_`EzD`Z-a@C+22ig~c;ps`7%z#x6;(Bq;r4Cbk5LEX3-j5Tv4A(bRA(7GGGjU#}n#-r@$E`vY&g(sTTIrE!x@h-mBL%!HZLojzBbALr ztR@4QR9m^(!xum9=ObB1*arqs`;!>)F29F)H< zbJO%&LM9XO62{W6LtIKpRk%6(AcqjP4En)!w28aBA+qj2xL5D2UB+b?!6!bX9h*EdQhzqrIO zL|3gh^H*r;AMM))kPpmkNE(GLFfMCvCu8DBe7Yo^;+GeSYoywlSxAq6S5!TEGO#my zYwZ@_q59^OCE%h3gpwOGZ?1FXNb&MLY97#GTU_MrUQYE$Pd@%Mk=^^_dYu*U55|4) zw!D|LzTS}Xe04o42R+vOMwGv>F=tCywc2VTA?{%Y#;$+&PY?gexZcndx^s_98E?e3b zVBo!7L#(|TEBBqtKkV&SQirkYcYiw0py2yn-eUvZ zcjI5GZp=s3TZW{_G7Hw=R*^Mc1$>J1fUf|J?`%SLxE!`3oSpk3Ge|Up{lK7G*0N6N zfzF{#V6Fqez__U?3u#UK%x2Gejtaumj+}_a;+STMOS3E;YdUvwjJ*dic85Er)sM4M?8~SAKjQ#Yu%K<9$Y*P}Bo|q|cMo&Aklg?Eo?=V*cu~9M%9Iav*-FI? zfPw8=m+u>UE;~hJY2vYrFggZ)V|b+*DbhpTrek{q6>!ZugS@~v8o~{}M52t;3A|Mv zol0pvJ3bom&XAbFQ7cxR_v5SE-!T5Abo+Gs6XhIqTAewb^%NI5cVootcrUt@oa=63 zjWvrRZ1)V{3$3jkOs+p#NX?7)%~_pNq|;h|-}h2#e63X4pHzi&ng$zyK;0iaVAtlp zL1}wW-+%5o?IwH3RR-TdW^40wUsBcAk#D@*!1@RC153xEz|rF?k_MD@8PY&5BdG}$hl(kljI&NukHYa@M>2y=kO)zqs2~qF#=q}35|N#nteH#l zn+l7I0d(PK);M2D927M$NE&Sh#hSZ=yuf_ng^|p?GldaiDhi=EA2=cTSxv`d`eQfy zPP2`(8%ES}7`_;M2iby&w$Z(w%h!MOmlSE*bDi1wl7%g2?yBjL=#DvdBhb04wW3Se z>!R|qwL(HY=?%Ex ze`Gir^E%9~S+Xl>zVO3eF54^L{J#AoXY{-!_a+MQw*D({uno4``68=Efkw`4p6Y8& zx86%LSNqzgn5z*sxJxeSrKJpB2^hO8lLJPMeh>UU<$SOs`W$?Ka}q&kom>4gk4bLH zFrM3m1@OUk#SI|CC<6RddGU0+R<LDhB;8I(g(!7LXM$xS`xDKC0T&R(7dWE#ye4dfjpVXv$%felgXYFtpFpR> z`jsss++)~1g8_ya0Bshc8kkj@4ib7nYx#*agMeN@A&V5ISR*7P4xCqtlVqBcAtAl$gQ;n zYJIE0CjupP#XY(B*!D5rI3|w95pSkDZrG#WP)!qfp51y?_G|p7La;a7hvH_3qnFLW zIRy=JgXohW^U|UoZ=*rip3yHGF&b%+AiOj=A+oKr{PJ#GbKoY1Pu5Iy1a&Yazfq+cd3W81A z-{vRdV^jpl3yj0@YrS3>8gaUBQ{t6(FCI(kP2R$^mx11yLgM9B9wf!&;KCCK^2wDt z?Qm@8E4G6QTYep>2kMQ4VpX=C_rzq4Zzqp1Hvd9_SoB8pJ7>K&60H?-rWgh}9NH&k zCJFdO$U<)AUp&eqlzRaDV+CO9A%^?T6Y)5%T9Q#DaqJIe>nKaVeqGCW6Il_n=HuRZ z7Gsw_e*S|(n)c+utk#v@xB5+-Zb@!IoI4C*&0?QcLPKT%ADRvD1$UPqZtYLSsaGI6 zd{@UEhaRTvzA-4Aa;;Q33X-U7i~;e&IP;FjTxs@dq&`1Oh**O(QMJ1E^MY4jq}W>M z@F^sS_i|zEjy?Ze^=cr0^-4u@`;OFnK$lFql&)ebAAjH-e^2{m-WTNG0Y~@Mn+i}!?!QvA|f9hjv63+S-Gl2UuH(mmdfEZ z{gFVovO4MQDG@*g3))9QVT}LQMW+O$daB&V%>e!2*U8<72W_F6rYhGs{A?3J{$RWQ zef#lw4|}BuN7(0zd`oJO*dN#@eCvXJ;|S?+ZH{n~02hp#*uv&Oo}zk2a~Yc&nWkZi zr`Lb^jjq5KUw+~BDppIwWPpKjG&>J{D#vM#yYa3xN8ZSv?I~KlYp(rXqemjBNjo3E zNDdQ+**sVI&j^#qbM5CgB2|(u4xH}tnYPEmmX1cbG+j#cfiHC6b>qgCs#gwKGCrj$ zIk~Y$Zm;F&Z1Ip@gQQnVGK~R(`&}f+A1A;ey?fd;Pad6;Y&sceUlA?$wyfKKUTUCB zL0jZvtvvDM5r`K)1P_OW9McCSd>)dHXJ*&`c=WRo|IL<3)-2;sZFQ;Y=m8Cke|QU4 zmfyRPR;tuYtAwh1qK7jtG91Lf6_hGXJ|wMWcNK%9oOEHMGbU#Jq?si&xI5sMrgx;% z+J!4szb5}Gj_tlvBnEeV)c+dioULLy|DgtUmpt{syNC5nQlX}JEbI$9Qml*+EVl?D zN{ALZ1F=CI5EsM)@k0ENAS4FKK}wK1qzUOjMvysl4f28lpu12g6a^(fX;3co1S*8e zph~C)YK6L>K4=gcg+4%Y&@!|M?LY_6PY4eSV!(n}u^<5~NE!=L!-6hhK{i;BI~L@P z1^HtE>m-Q3+-d*qEc3TB<=^(Af4j5(+uiry?$G}>_J14mzuo2kZH)g`^Z&MYg~0s% HCD8u@5ZI=b literal 0 HcmV?d00001 diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 23e2daa43..acdeb304c 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,3 +1,6 @@ +use std::io::Write; +use std::path::Path; + use assert_matches::assert_matches; use miden_protocol::ONE; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; @@ -6,11 +9,23 @@ use super::*; type TestResult = Result<(), Box>; +/// Helper to write TOML content to a file and return the path +fn write_toml_file(dir: &Path, content: &str) -> std::path::PathBuf { + let path = dir.join("genesis.toml"); + let mut file = std::fs::File::create(&path).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + path +} + #[test] #[miden_node_test_macro::enable_logging] fn parsing_yields_expected_default_values() -> TestResult { - let s = include_str!("./samples/01-simple.toml"); - let gcfg = GenesisConfig::read_toml(s)?; + // Copy sample file to temp dir since read_toml_file needs a real file path + let temp_dir = tempfile::tempdir()?; + let sample_content = include_str!("./samples/01-simple.toml"); + let config_path = write_toml_file(temp_dir.path(), sample_content); + + let gcfg = GenesisConfig::read_toml_file(&config_path)?; let (state, _secrets) = gcfg.into_state(SecretKey::new())?; let _ = state; // faucets always precede wallet accounts @@ -30,8 +45,8 @@ fn parsing_yields_expected_default_values() -> TestResult { { let faucet = BasicFungibleFaucet::try_from(native_faucet.clone()).unwrap(); - assert_eq!(faucet.max_supply(), Felt::new(100_000_000)); - assert_eq!(faucet.decimals(), 3); + assert_eq!(faucet.max_supply(), Felt::new(100_000_000_000_000_000)); + assert_eq!(faucet.decimals(), 6); assert_eq!(faucet.symbol(), TokenSymbol::new("MIDEN").unwrap()); } @@ -67,3 +82,275 @@ fn genesis_accounts_have_nonce_one() -> TestResult { let _block = state.into_block()?; Ok(()) } + +#[test] +fn parsing_account_from_file() -> TestResult { + use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; + use miden_standards::AuthScheme; + use miden_standards::account::wallets::create_basic_wallet; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a test wallet account and save it to a .mac file + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + + let test_account = create_basic_wallet( + init_seed, + auth, + AccountType::RegularAccountUpdatableCode, + AccountStorageMode::Public, + )?; + + let account_id = test_account.id(); + + // Save to file + let account_file_path = config_dir.join("test_account.mac"); + let account_file = AccountFile::new(test_account, vec![]); + account_file.write(&account_file_path)?; + + // Create a genesis config TOML that references the account file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +[[account]] +path = "test_account.mac" +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parse the config + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // Convert to state and verify the account is included + let (state, _secrets) = gcfg.into_state(SecretKey::new())?; + assert!(state.accounts.iter().any(|a| a.id() == account_id)); + + Ok(()) +} + +#[test] +fn parsing_native_faucet_from_file() -> TestResult { + use miden_protocol::account::{AccountBuilder, AccountFile, AccountStorageMode, AccountType}; + use miden_standards::account::auth::AuthFalcon512Rpo; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a faucet account and save it to a .mac file + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); + + let faucet_component = + BasicFungibleFaucet::new(TokenSymbol::new("MIDEN").unwrap(), 6, Felt::new(1_000_000_000))?; + + let faucet_account = AccountBuilder::new(init_seed) + .account_type(AccountType::FungibleFaucet) + .storage_mode(AccountStorageMode::Public) + .with_auth_component(auth) + .with_component(faucet_component) + .build()?; + + let faucet_id = faucet_account.id(); + + // Save to file + let faucet_file_path = config_dir.join("native_faucet.mac"); + let account_file = AccountFile::new(faucet_account, vec![]); + account_file.write(&faucet_file_path)?; + + // Create a genesis config TOML that references the faucet file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +native_faucet = "native_faucet.mac" + +[fee_parameters] +verification_base_fee = 0 +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parse the config + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // Convert to state and verify the native faucet is included + let (state, secrets) = gcfg.into_state(SecretKey::new())?; + assert!(state.accounts.iter().any(|a| a.id() == faucet_id)); + + // No secrets should be generated for file-loaded native faucet + assert!(secrets.secrets.is_empty()); + + Ok(()) +} + +#[test] +fn native_faucet_from_file_must_be_faucet_type() -> TestResult { + use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; + use miden_standards::AuthScheme; + use miden_standards::account::wallets::create_basic_wallet; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a regular wallet account (not a faucet) and try to use it as native faucet + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + + let regular_account = create_basic_wallet( + init_seed, + auth, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + )?; + + // Save to file + let account_file_path = config_dir.join("not_a_faucet.mac"); + let account_file = AccountFile::new(regular_account, vec![]); + account_file.write(&account_file_path)?; + + // Create a genesis config TOML that tries to use a non-faucet as native faucet + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +native_faucet = "not_a_faucet.mac" + +[fee_parameters] +verification_base_fee = 0 +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parsing should succeed + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // into_state should fail with NativeFaucetNotFungible error when loading the file + let result = gcfg.into_state(SecretKey::new()); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + matches!(err, GenesisConfigError::NativeFaucetNotFungible { .. }), + "Expected NativeFaucetNotFungible error, got: {err:?}" + ); + + Ok(()) +} + +#[test] +fn missing_account_file_returns_error() { + // Create a genesis config TOML that references a non-existent file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +[[account]] +path = "does_not_exist.mac" +"#; + + // Use temp dir as config dir + let temp_dir = tempfile::tempdir().unwrap(); + let config_path = write_toml_file(temp_dir.path(), toml_content); + + // Parsing should succeed + let gcfg = GenesisConfig::read_toml_file(&config_path).unwrap(); + + // into_state should fail with AccountFileRead error when loading the file + let result = gcfg.into_state(SecretKey::new()); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + matches!(err, GenesisConfigError::AccountFileRead(..)), + "Expected AccountFileRead error, got: {err:?}" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn parsing_agglayer_sample_with_account_files() -> TestResult { + use miden_protocol::account::AccountType; + + // Use the actual sample file path since it references relative .mac files + let sample_path = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("src/genesis/config/samples/02-with-account-files.toml"); + + let gcfg = GenesisConfig::read_toml_file(&sample_path)?; + let (state, secrets) = gcfg.into_state(SecretKey::new())?; + + // Should have 4 accounts: + // 1. Native faucet (MIDEN) - built from parameters + // 2. Bridge account (bridge.mac) - loaded from file + // 3. ETH faucet (agglayer_faucet_eth.mac) - loaded from file + // 4. USDC faucet (agglayer_faucet_usdc.mac) - loaded from file + assert_eq!(state.accounts.len(), 4, "Expected 4 accounts in genesis state"); + + // Verify account types + let native_faucet = &state.accounts[0]; + let bridge_account = &state.accounts[1]; + let eth_faucet = &state.accounts[2]; + let usdc_faucet = &state.accounts[3]; + + // Native faucet should be a fungible faucet (built from parameters) + assert_eq!( + native_faucet.id().account_type(), + AccountType::FungibleFaucet, + "Native faucet should be a FungibleFaucet" + ); + + // Verify native faucet symbol + { + let faucet = BasicFungibleFaucet::try_from(native_faucet.clone()).unwrap(); + assert_eq!(faucet.symbol(), TokenSymbol::new("MIDEN").unwrap()); + } + + // Bridge account is a regular account (not a faucet) + assert!( + bridge_account.is_regular_account(), + "Bridge account should be a regular account" + ); + + // ETH faucet should be a fungible faucet (AggLayer faucet loaded from file) + assert_eq!( + eth_faucet.id().account_type(), + AccountType::FungibleFaucet, + "ETH faucet should be a FungibleFaucet" + ); + + // USDC faucet should be a fungible faucet (AggLayer faucet loaded from file) + assert_eq!( + usdc_faucet.id().account_type(), + AccountType::FungibleFaucet, + "USDC faucet should be a FungibleFaucet" + ); + + // Only the native faucet generates a secret (built from parameters) + assert_eq!(secrets.secrets.len(), 1, "Only native faucet should generate a secret"); + + // Verify the genesis state can be converted to a block + let _block = state.into_block()?; + + Ok(()) +} diff --git a/docs/external/src/operator/usage.md b/docs/external/src/operator/usage.md index fa4861723..e8bd377bb 100644 --- a/docs/external/src/operator/usage.md +++ b/docs/external/src/operator/usage.md @@ -50,8 +50,8 @@ miden-node bundled bootstrap \ --genesis-config-file genesis.toml ``` -The genesis configuration file should contain fee parameters, the native faucet, optionally other -fungible faucets, and also optionally, wallet definitions with assets, for example: +The genesis configuration file should contain fee parameters, optionally a custom native faucet, +optionally other fungible faucets, and also optionally, wallet definitions with assets, for example: ```toml # The UNIX timestamp of the genesis block. It will influence the hash of the genesis block. @@ -59,11 +59,13 @@ timestamp = 1717344256 # Defines the format of the block protocol to use for the genesis block. version = 1 -# The native faucet to use for fees. -[native_faucet] -symbol = "MIDEN" -decimals = 6 -max_supply = 100_000_000_000_000_000 +# The native faucet defaults to a MIDEN token (symbol="MIDEN", decimals=6, +# max_supply=100_000_000_000_000_000). To override it with a pre-built account +# file, specify the path: +# +# native_faucet = "path/to/faucet.mac" +# +# The path is relative to this configuration file. # The fee parameters to use for the genesis block. [fee_parameters] @@ -95,6 +97,17 @@ storage_mode = "private" # has_updatable_code = false # default value ``` +To include pre-built accounts (e.g. bridge or wrapped-asset faucets) in the genesis block, use +`[[account]]` entries with paths to `.mac` files: + +```toml +[[account]] +path = "bridge.mac" + +[[account]] +path = "eth_faucet.mac" +``` + ## Operation Start the node with the desired public gRPC server address. From d970008d4fbd790ceb4d2ecc5e3b709b02f6c930 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 24 Feb 2026 15:53:30 +1300 Subject: [PATCH 51/77] ci: RM dry run workflow (#1690) --- .github/workflows/publish-dry-run.yml | 44 --------------------------- 1 file changed, 44 deletions(-) delete mode 100644 .github/workflows/publish-dry-run.yml diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml deleted file mode 100644 index c84a08d34..000000000 --- a/.github/workflows/publish-dry-run.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Publish (dry-run) - -permissions: - contents: read - -on: - push: - branches: [main, next] - -concurrency: - group: "${{ github.workflow }} @ ${{ github.ref }}" - cancel-in-progress: true - -jobs: - publish-dry-run: - name: Cargo publish dry-run - runs-on: Linux-ARM64-Runner - if: ${{ github.repository_owner == '0xMiden' }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y jq - - name: Update Rust toolchain - run: rustup update --no-self-update - - uses: taiki-e/install-action@v2 - with: - tool: cargo-binstall@1.16.6 - - name: Install cargo-msrv - run: cargo binstall --no-confirm --force cargo-msrv - - name: Check MSRV for each workspace member - run: | - export PATH="$HOME/.cargo/bin:$PATH" - ./scripts/check-msrv.sh - - name: Run cargo publish dry-run - run: cargo publish --workspace --dry-run - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} From 0504583fb9e40d11515ce46d65747087bb9320d8 Mon Sep 17 00:00:00 2001 From: juan518munoz <62400508+juan518munoz@users.noreply.github.com> Date: Tue, 24 Feb 2026 12:05:46 -0300 Subject: [PATCH 52/77] fix: TransactionHeader serialization (#1701) * fix: TransactionHeader db serialization fix * docs: update changelog --- CHANGELOG.md | 1 + .../src/db/models/queries/transactions.rs | 34 ++++---- crates/store/src/db/tests.rs | 78 +++++++++++++++++++ 3 files changed, 100 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 829d43dc1..7133c5973 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - Validator now persists validated transactions ([#1614](https://github.com/0xMiden/miden-node/pull/1614)). - [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). - Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). +- Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/miden-node/issues/1701)). ### Changes diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 3e7e30df2..72bdcaea1 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -125,11 +125,25 @@ impl TransactionSummaryRowInsert { ) -> Self { const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments - // Serialize input notes using binary format (store nullifiers) - let nullifiers_binary = transaction_header.input_notes().to_bytes(); + // Extract nullifiers from input notes and serialize them. + // We only store the nullifiers (not the full `InputNoteCommitment`) since + // that's all that's needed when reading back `TransactionRecords`. + let nullifiers: Vec = transaction_header + .input_notes() + .iter() + .map(miden_protocol::transaction::InputNoteCommitment::nullifier) + .collect(); + let nullifiers_binary = nullifiers.to_bytes(); - // Serialize output notes using binary format (store note IDs) - let output_notes_binary = transaction_header.output_notes().to_bytes(); + // Extract note IDs from output note headers and serialize them. + // We only store the `NoteId`s (not the full `NoteHeader` with metadata) since + // that's all that's needed when reading back `TransactionRecords`. + let output_note_ids: Vec = transaction_header + .output_notes() + .iter() + .map(miden_protocol::note::NoteHeader::id) + .collect(); + let output_notes_binary = output_note_ids.to_bytes(); // Manually calculate the estimated size of the transaction header to avoid // the cost of serialization. The size estimation includes: @@ -269,12 +283,13 @@ pub fn select_transactions_records( // Add transactions from this chunk one by one until we hit the limit let mut added_from_chunk = 0; - let mut last_added_tx: Option = None; for tx in chunk { if total_size + tx.size_in_bytes <= max_payload_bytes { total_size += tx.size_in_bytes; - last_added_tx = Some(tx); + last_block_num = Some(tx.block_num); + last_transaction_id = Some(tx.transaction_id.clone()); + all_transactions.push(tx); added_from_chunk += 1; } else { // Can't fit this transaction, stop here @@ -282,13 +297,6 @@ pub fn select_transactions_records( } } - // Update cursor position only for the last transaction that was actually added - if let Some(tx) = last_added_tx { - last_block_num = Some(tx.block_num); - last_transaction_id = Some(tx.transaction_id.clone()); - all_transactions.push(tx); - } - // Break if chunk incomplete (size limit hit or data exhausted) if added_from_chunk < NUM_TXS_PER_CHUNK { break; diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 4c8a9f915..2154af4c3 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -2398,3 +2398,81 @@ fn test_prune_history() { "is_latest=true entry should be retained even if old" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); + + // Build two transaction headers with distinct data + let tx1 = mock_block_transaction(account_id, 1); + let tx2 = mock_block_transaction(account_id, 2); + let ordered = OrderedTransactionHeaders::new_unchecked(vec![tx1.clone(), tx2.clone()]); + + // Insert + let count = queries::insert_transactions(&mut conn, block_num, &ordered).unwrap(); + assert_eq!(count, 2, "Should insert 2 transactions"); + + // Retrieve + let (last_block, records) = queries::select_transactions_records( + &mut conn, + &[account_id], + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + assert_eq!(last_block, block_num, "Last block should match"); + assert_eq!(records.len(), 2, "Should retrieve 2 transactions"); + + // Verify each transaction roundtrips correctly. + // Records are ordered by (block_num, transaction_id), so match by ID. + let originals = [&tx1, &tx2]; + for record in &records { + let original = originals + .iter() + .find(|tx| tx.id() == record.transaction_id) + .expect("Retrieved transaction should match one of the originals"); + assert_eq!( + record.transaction_id, + original.id(), + "TransactionId DB roundtrip must be symmetric" + ); + assert_eq!( + record.account_id, + original.account_id(), + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!(record.block_num, block_num, "Block number must match"); + assert_eq!( + record.initial_state_commitment, + original.initial_state_commitment(), + "Initial state commitment DB roundtrip must be symmetric" + ); + assert_eq!( + record.final_state_commitment, + original.final_state_commitment(), + "Final state commitment DB roundtrip must be symmetric" + ); + + // Input notes are stored as nullifiers only + let expected_nullifiers: Vec = + original.input_notes().iter().map(InputNoteCommitment::nullifier).collect(); + assert_eq!( + record.nullifiers, expected_nullifiers, + "Nullifiers (from input notes) DB roundtrip must be symmetric" + ); + + // Output notes are stored as note IDs only + let expected_note_ids: Vec = + original.output_notes().iter().map(NoteHeader::id).collect(); + assert_eq!( + record.output_notes, expected_note_ids, + "Output note IDs DB roundtrip must be symmetric" + ); + } +} From 8432310c4eee4f6f102bc1810e304d9e71df3ec4 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Tue, 24 Feb 2026 15:32:44 -0800 Subject: [PATCH 53/77] chore: fix changelog date and increment MSRV to 1.91 --- CHANGELOG.md | 2 +- Cargo.toml | 2 +- rust-toolchain.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7133c5973..d0e369a9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,7 @@ - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). - [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). -## v0.13.5 (TBD) +## v0.13.5 (2026-02-19) - OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/miden-node/pull/1643)). - Added support for the note transport layer in the network monitor ([#1660](https://github.com/0xMiden/miden-node/pull/1660)). diff --git a/Cargo.toml b/Cargo.toml index ee8ef78dd..2f6828f98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ homepage = "https://miden.xyz" license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" -rust-version = "1.90" +rust-version = "1.91" version = "0.14.0" # Optimize the cryptography for faster tests involving account creation. diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6744e56e1..d9a424cef 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.90" +channel = "1.91" components = ["clippy", "rust-src", "rustfmt"] profile = "minimal" targets = ["wasm32-unknown-unknown"] From 893bcf86cfbc5a7f31049d5e641f93703d2be0ec Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 25 Feb 2026 13:33:52 +0100 Subject: [PATCH 54/77] feat: improve node startup time by adding block header commitment table column (#1700) --- bin/remote-prover/src/server/prover.rs | 4 +- .../db/migrations/2025062000000_setup/up.sql | 7 +++ .../2026020600000_cleanup_indices/down.sql | 4 -- .../2026020600000_cleanup_indices/up.sql | 9 --- .../down.sql | 2 - .../20260206163855_add_account_indices/up.sql | 2 - crates/store/src/db/mod.rs | 14 ++++- crates/store/src/db/models/conv.rs | 32 +++++++++- .../src/db/models/queries/accounts/tests.rs | 1 + .../src/db/models/queries/block_headers.rs | 58 +++++++++++++++++-- .../src/db/models/queries/transactions.rs | 3 +- crates/store/src/db/models/utils.rs | 12 +--- crates/store/src/db/schema.rs | 1 + crates/store/src/errors.rs | 2 +- crates/store/src/state/loader.rs | 15 +++-- 15 files changed, 116 insertions(+), 50 deletions(-) delete mode 100644 crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql delete mode 100644 crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql delete mode 100644 crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql delete mode 100644 crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs index 3a163a190..6ca76794e 100644 --- a/bin/remote-prover/src/server/prover.rs +++ b/bin/remote-prover/src/server/prover.rs @@ -31,8 +31,8 @@ impl Prover { } } - /// Proves a [`ProofRequest`] using the appropriate prover implementation as specified during - /// construction. + /// Proves a [`proto::ProofRequest`] using the appropriate prover implementation as specified + /// during construction. pub fn prove(&self, request: proto::ProofRequest) -> Result { match self { Prover::Transaction(prover) => prover.prove_request(request), diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 40491d4d5..1f0e151ab 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -2,6 +2,7 @@ CREATE TABLE block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, signature BLOB NOT NULL, + commitment BLOB NOT NULL, PRIMARY KEY (block_num), CONSTRAINT block_header_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) @@ -156,3 +157,9 @@ CREATE TABLE transactions ( CREATE INDEX idx_transactions_account_id ON transactions(account_id); -- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); + +CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; +CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; + +CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; +CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql b/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql deleted file mode 100644 index 1195d70bd..000000000 --- a/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Reverse the cleanup indices migration - -DROP INDEX IF EXISTS idx_vault_cleanup; -DROP INDEX IF EXISTS idx_storage_cleanup; diff --git a/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql b/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql deleted file mode 100644 index b98f55c6d..000000000 --- a/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql +++ /dev/null @@ -1,9 +0,0 @@ --- Add indices to optimize cleanup queries that delete old non-latest entries. --- --- These partial indices only include rows where is_latest = 0, making them: --- - Smaller (only index rows that will eventually be deleted) --- - Faster for cleanup operations (direct lookup of old entries) --- - No overhead for is_latest = 1 rows (which are never deleted) - -CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; -CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql deleted file mode 100644 index 1a15b55c4..000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP INDEX IF EXISTS idx_account_storage_map_latest_by_account_slot_key; -DROP INDEX IF EXISTS idx_account_vault_assets_latest_by_account_key; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql deleted file mode 100644 index 83233e157..000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql +++ /dev/null @@ -1,2 +0,0 @@ -CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; -CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 803d532f0..74aa8ce3b 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -28,12 +28,12 @@ use tracing::{info, instrument}; use crate::COMPONENT; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; -use crate::db::models::queries::StorageMapValuesPage; pub use crate::db::models::queries::{ AccountCommitmentsPage, NullifiersPage, PublicAccountIdsPage, }; +use crate::db::models::queries::{BlockHeaderCommitment, StorageMapValuesPage}; use crate::db::models::{Page, queries}; use crate::errors::{DatabaseError, NoteSyncError}; use crate::genesis::GenesisBlock; @@ -266,7 +266,7 @@ impl Db { /// Open a connection to the DB and apply any pending migrations. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(database_filepath: PathBuf) -> Result { + pub async fn load(database_filepath: PathBuf) -> Result { let db = miden_node_db::Db::new(&database_filepath)?; info!( target: COMPONENT, @@ -359,6 +359,16 @@ impl Db { .await } + /// Loads all the block headers from the DB. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_block_header_commitments(&self) -> Result> { + self.transact("all block headers", |conn| { + let raw = queries::select_all_block_header_commitments(conn)?; + Ok(raw) + }) + .await + } + /// Returns a page of account commitments for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account_commitments_paged( diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 3720729b1..2176ea0d4 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,12 +32,14 @@ on relevant platforms" )] +use miden_crypto::Word; +use miden_crypto::utils::Deserializable; use miden_protocol::Felt; use miden_protocol::account::{StorageSlotName, StorageSlotType}; -use miden_protocol::block::BlockNumber; +use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::NoteTag; -use crate::db::models::queries::NetworkAccountType; +use crate::db::models::queries::{BlockHeaderCommitment, NetworkAccountType}; #[derive(Debug, thiserror::Error)] #[error("failed to convert from database type {from_type} into {into_type}")] @@ -67,6 +69,32 @@ pub trait SqlTypeConvert: Sized { } } +impl SqlTypeConvert for BlockHeaderCommitment { + type Raw = Vec; + fn from_raw_sql( + raw: Self::Raw, + ) -> Result { + let inner = + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err)?; + Ok(BlockHeaderCommitment(inner)) + } + fn to_raw_sql(self) -> Self::Raw { + self.0.as_bytes().to_vec() + } +} + +impl SqlTypeConvert for BlockHeader { + type Raw = Vec; + + fn from_raw_sql(raw: Self::Raw) -> Result { + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + miden_crypto::utils::Serializable::to_bytes(&self) + } +} + impl SqlTypeConvert for NetworkAccountType { type Raw = i32; diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index fa1e77e85..dd1ab9748 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -183,6 +183,7 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { block_headers::block_num.eq(i64::from(block_num.as_u32())), block_headers::block_header.eq(block_header.to_bytes()), block_headers::signature.eq(signature.to_bytes()), + block_headers::commitment.eq(block_header.commitment().to_bytes()), )) .execute(conn) .expect("Failed to insert block header"); diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 553430ddb..bfcd34ee7 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,6 +11,7 @@ use diesel::{ SelectableHelper, SqliteConnection, }; +use miden_crypto::Word; use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -125,6 +126,44 @@ pub fn select_all_block_headers( vec_raw_try_into(raw_block_headers) } +/// Select all block headers from the DB using the given [`SqliteConnection`]. +/// +/// # Returns +/// +/// A vector of [`BlockHeader`] or an error. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT commitment +/// FROM block_headers +/// ORDER BY block_num ASC +/// ``` +pub fn select_all_block_header_commitments( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + let raw_commitments = + QueryDsl::select(schema::block_headers::table, schema::block_headers::commitment) + .order(schema::block_headers::block_num.asc()) + .load::>(conn)?; + let commitments = + Result::from_iter(raw_commitments.into_iter().map(BlockHeaderCommitment::from_raw_sql))?; + Ok(commitments) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(transparent)] +pub struct BlockHeaderCommitment(pub(crate) Word); + +impl BlockHeaderCommitment { + pub fn new(header: &BlockHeader) -> Self { + Self(header.commitment()) + } + pub fn word(self) -> Word { + self.0 + } +} + #[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] @@ -133,11 +172,18 @@ pub struct BlockHeaderRawRow { pub block_num: i64, pub block_header: Vec, pub signature: Vec, + pub commitment: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; fn try_into(self) -> Result { - let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let block_header = BlockHeader::from_raw_sql(self.block_header)?; + // we're bust if this invariant doesn't hold + debug_assert_eq!( + BlockHeaderCommitment::new(&block_header), + BlockHeaderCommitment::from_raw_sql(self.commitment) + .expect("Database always contains valid format commitments") + ); Ok(block_header) } } @@ -158,13 +204,15 @@ pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, pub signature: Vec, + pub commitment: Vec, } impl From<(&BlockHeader, &Signature)> for BlockHeaderInsert { - fn from(from: (&BlockHeader, &Signature)) -> Self { + fn from((header, signature): (&BlockHeader, &Signature)) -> Self { Self { - block_num: from.0.block_num().to_raw_sql(), - block_header: from.0.to_bytes(), - signature: from.1.to_bytes(), + block_num: header.block_num().to_raw_sql(), + block_header: header.to_bytes(), + signature: signature.to_bytes(), + commitment: BlockHeaderCommitment::new(header).to_raw_sql(), } } } diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 72bdcaea1..1095fc189 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -89,10 +89,9 @@ pub(crate) fn insert_transactions( block_num: BlockNumber, transactions: &OrderedTransactionHeaders, ) -> Result { - #[expect(clippy::into_iter_on_ref)] // false positive let rows: Vec<_> = transactions .as_slice() - .into_iter() + .iter() .map(|tx| TransactionSummaryRowInsert::new(tx, block_num)) .collect(); diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index 1ace2abaa..ef74e86fa 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -1,6 +1,6 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_protocol::note::Nullifier; -use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::utils::Serializable; use crate::errors::DatabaseError; @@ -14,16 +14,6 @@ pub(crate) fn vec_raw_try_into>( ) } -#[expect(dead_code)] -/// Deserialize an iterable container full of byte blobs `B` to types `T` -pub(crate) fn deserialize_raw_vec, T: Deserializable>( - raw: impl IntoIterator, -) -> Result, DeserializationError> { - Result::, DeserializationError>::from_iter( - raw.into_iter().map(|raw| T::read_from_bytes(raw.as_ref())), - ) -} - /// Utility to convert an iterable container to a vector of byte blobs pub(crate) fn serialize_vec<'a, D: Serializable + 'a>( raw: impl IntoIterator, diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index ebb8c280f..f93afc16e 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -48,6 +48,7 @@ diesel::table! { block_num -> BigInt, block_header -> Binary, signature -> Binary, + commitment -> Binary, } } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 61bbf3e99..a277f1c68 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -119,7 +119,7 @@ pub enum StateInitializationError { #[error("failed to load block store")] BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] - DatabaseLoadError(#[from] miden_node_db::DatabaseError), + DatabaseLoadError(#[source] DatabaseError), #[error("inner forest error")] InnerForestError(#[from] InnerForestError), #[error( diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index d237716f3..c8c886148 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -12,9 +12,10 @@ use std::future::Future; use std::num::NonZeroUsize; use std::path::Path; +use miden_crypto::merkle::mmr::Mmr; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; -use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; +use miden_protocol::block::{BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] use miden_protocol::crypto::merkle::smt::MemoryStorage; use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; @@ -30,6 +31,7 @@ use { use crate::COMPONENT; use crate::db::Db; +use crate::db::models::queries::BlockHeaderCommitment; use crate::errors::{DatabaseError, StateInitializationError}; use crate::inner_forest::InnerForest; @@ -331,16 +333,13 @@ pub fn load_smt(storage: S) -> Result, StateInitializ /// Loads the blockchain MMR from all block headers in the database. #[instrument(target = COMPONENT, skip_all)] pub async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); + let block_commitments = db.select_all_block_header_commitments().await?; // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX // entries. - let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + let chain_mmr = Blockchain::from_mmr_unchecked(Mmr::from( + block_commitments.iter().copied().map(BlockHeaderCommitment::word), + )); Ok(chain_mmr) } From a5987f688569518624b9f9cf672c88d0c7761b3f Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 26 Feb 2026 08:50:26 +0200 Subject: [PATCH 55/77] chore: update dependencies (#1710) --- Cargo.lock | 1398 ++++++++++++++---------- Cargo.toml | 5 +- bin/network-monitor/Cargo.toml | 6 +- bin/node/Cargo.toml | 1 - bin/node/Dockerfile | 2 +- crates/block-producer/Cargo.toml | 4 +- crates/remote-prover-client/Cargo.toml | 4 +- crates/rpc/Cargo.toml | 2 +- crates/store/Cargo.toml | 6 +- crates/utils/Cargo.toml | 4 +- crates/utils/src/config.rs | 23 - crates/utils/src/lib.rs | 1 - 12 files changed, 844 insertions(+), 612 deletions(-) delete mode 100644 crates/utils/src/config.rs diff --git a/Cargo.lock b/Cargo.lock index e3f772dbd..763c7cf5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,6 +36,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloca" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" +dependencies = [ + "cc", +] + [[package]] name = "allocator-api2" version = "0.2.21" @@ -109,12 +118,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.100" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" -dependencies = [ - "backtrace", -] +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "arrayref" @@ -151,16 +157,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "atomic" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" -dependencies = [ - "bytemuck", + "syn 2.0.117", ] [[package]] @@ -175,6 +172,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-rs" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.37.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "axum" version = "0.8.8" @@ -303,13 +322,13 @@ dependencies = [ "bitflags", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -329,9 +348,9 @@ checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "blake3" @@ -358,15 +377,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" - -[[package]] -name = "bytemuck" -version = "1.24.0" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "byteorder" @@ -398,9 +411,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.54" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", "jobserver", @@ -408,6 +421,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -423,6 +442,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -449,9 +474,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", "js-sys", @@ -511,9 +536,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.55" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", "clap_derive", @@ -521,9 +546,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.55" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", @@ -540,14 +565,23 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "clap_lex" -version = "0.7.7" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" + +[[package]] +name = "cmake" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] [[package]] name = "colorchoice" @@ -555,6 +589,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -604,25 +648,24 @@ dependencies = [ [[package]] name = "criterion" -version = "0.5.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" dependencies = [ + "alloca", "anes", "cast", "ciborium", "clap", "criterion-plot", - "is-terminal", - "itertools 0.10.5", + "itertools 0.13.0", "num-traits", - "once_cell", "oorandom", + "page_size", "plotters", "rayon", "regex", "serde", - "serde_derive", "serde_json", "tinytemplate", "walkdir", @@ -630,12 +673,12 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.5.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" dependencies = [ "cast", - "itertools 0.10.5", + "itertools 0.13.0", ] [[package]] @@ -716,7 +759,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -740,7 +783,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -751,7 +794,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -807,9 +850,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", ] @@ -832,7 +875,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -862,7 +905,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -882,7 +925,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -911,7 +954,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -937,9 +980,15 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ecdsa" version = "0.16.9" @@ -1006,9 +1055,9 @@ dependencies = [ [[package]] name = "ena" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +checksum = "eabffdaee24bd1bf95c5ef7cec31260444317e72ea56c4c91750e8b7ee58d5f1" dependencies = [ "log", ] @@ -1024,9 +1073,9 @@ dependencies = [ [[package]] name = "env_filter" -version = "0.1.4" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" +checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" dependencies = [ "log", "regex", @@ -1034,9 +1083,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" +checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" dependencies = [ "anstream", "anstyle", @@ -1083,27 +1132,11 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" -[[package]] -name = "figment" -version = "0.10.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" -dependencies = [ - "atomic", - "parking_lot", - "pear", - "serde", - "tempfile", - "toml 0.8.23", - "uncased", - "version_check", -] - [[package]] name = "find-msvc-tools" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "fixedbitset" @@ -1131,24 +1164,15 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" - -[[package]] -name = "foreign-types" -version = "0.3.2" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] -name = "foreign-types-shared" -version = "0.1.1" +name = "foldhash" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "form_urlencoded" @@ -1161,18 +1185,24 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" +checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" dependencies = [ "autocfg", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1185,9 +1215,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1195,15 +1225,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -1212,32 +1242,32 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -1247,9 +1277,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -1259,7 +1289,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -1316,6 +1345,21 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasip3", + "wasm-bindgen", +] + [[package]] name = "gimli" version = "0.32.3" @@ -1369,6 +1413,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + [[package]] name = "hashbrown" version = "0.16.1" @@ -1377,7 +1430,7 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.2.0", "rayon", "serde", "serde_core", @@ -1522,32 +1575,15 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ "base64", "bytes", "futures-channel", - "futures-core", "futures-util", "http", "http-body", @@ -1566,9 +1602,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.64" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1669,6 +1705,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -1709,15 +1751,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.16.1", + "serde", + "serde_core", ] -[[package]] -name = "inlinable_string" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" - [[package]] name = "inout" version = "0.1.4" @@ -1743,17 +1781,6 @@ dependencies = [ "serde", ] -[[package]] -name = "is-terminal" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.61.2", -] - [[package]] name = "is_ci" version = "1.2.0" @@ -1768,9 +1795,9 @@ checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" -version = "0.10.5" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] @@ -1792,9 +1819,9 @@ checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jiff" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" +checksum = "b3e3d65f018c6ae946ab16e80944b97096ed73c35b221d1c478a6c81d8f57940" dependencies = [ "jiff-static", "log", @@ -1805,15 +1832,37 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" +checksum = "a17c2b211d863c7fde02cbea8a3c1a439b98e109286554f2860bdded7ff83818" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", +] + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", ] +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.34" @@ -1826,9 +1875,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "14dc6f6450b3f6d4ed5b16327f38fed626d375a886159ca555bd7822c0c3a5a6" dependencies = [ "once_cell", "wasm-bindgen", @@ -1868,7 +1917,7 @@ dependencies = [ "ena", "itertools 0.14.0", "lalrpop-util", - "petgraph", + "petgraph 0.7.1", "regex", "regex-syntax", "sha3", @@ -1893,11 +1942,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" -version = "0.2.180" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libloading" @@ -1942,9 +1997,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" dependencies = [ "cc", "pkg-config", @@ -1959,9 +2014,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" @@ -2006,7 +2061,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2037,6 +2092,12 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "lz4-sys" version = "1.11.1+lz4-1.10.0" @@ -2070,9 +2131,9 @@ checksum = "120fa187be19d9962f0926633453784691731018a2bf936ddb4e29101b79c4a7" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "miden-agglayer" @@ -2092,22 +2153,22 @@ dependencies = [ [[package]] name = "miden-air" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2f1db9cdbd5da3eaf07fa0a8122d27b575f96b0699388c98f6c0e468cb9c1f" +checksum = "5cca9632323bd4e32ae5b21b101ed417a646f5d72196b1bf3f1ca889a148322a" dependencies = [ "miden-core", "miden-utils-indexing", - "thiserror", + "thiserror 2.0.18", "winter-air", "winter-prover", ] [[package]] name = "miden-assembly" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4aba6bc5cfda2393ecc032b55caabde289fb980650560f8333803db4e48f09" +checksum = "2395b2917aea613a285d3425d1ca07e6c45442e2b34febdea2081db555df62fc" dependencies = [ "env_logger", "log", @@ -2115,14 +2176,14 @@ dependencies = [ "miden-core", "miden-mast-package", "smallvec", - "thiserror", + "thiserror 2.0.18", ] [[package]] name = "miden-assembly-syntax" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23eae66f2a55c2a0666f4ed896b61797845b528435ad2bae41fd9a221f94bad7" +checksum = "1f9bed037d137f209b9e7b28811ec78c0536b3f9259d6f4ceb5823c87513b346" dependencies = [ "aho-corasick", "env_logger", @@ -2139,7 +2200,7 @@ dependencies = [ "rustc_version 0.4.1", "semver 1.0.27", "smallvec", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -2148,14 +2209,14 @@ version = "0.14.0" source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", - "thiserror", + "thiserror 2.0.18", ] [[package]] name = "miden-core" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2716bb01f07f0b19398e3d9785e23a724b89aef64d614a9073c1d44c6898a9a9" +checksum = "8714aa5f86c59e647b7417126b32adc4ef618f835964464f5425549df76b6d03" dependencies = [ "derive_more", "itertools 0.14.0", @@ -2168,16 +2229,16 @@ dependencies = [ "num-traits", "proptest", "proptest-derive", - "thiserror", + "thiserror 2.0.18", "winter-math", "winter-utils", ] [[package]] name = "miden-core-lib" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac97f4fb334ee842663f99f33677beacc7bdf4b7d4eeff419c2cd98a5a68bfa" +checksum = "1bb16a4d39202c59a7964d3585cd5af21a46a759ff6452cb5f20723ed5af4362" dependencies = [ "env_logger", "fs-err", @@ -2187,14 +2248,14 @@ dependencies = [ "miden-processor", "miden-utils-sync", "sha2", - "thiserror", + "thiserror 2.0.18", ] [[package]] name = "miden-crypto" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" +checksum = "999926d48cf0929a39e06ce22299084f11d307ca9e765801eb56bf192b07054b" dependencies = [ "blake3", "cc", @@ -2203,7 +2264,7 @@ dependencies = [ "ed25519-dalek", "flume", "glob", - "hashbrown", + "hashbrown 0.16.1", "hkdf", "k256", "miden-crypto-derive", @@ -2218,7 +2279,7 @@ dependencies = [ "sha2", "sha3", "subtle", - "thiserror", + "thiserror 2.0.18", "winter-crypto", "winter-math", "winter-utils", @@ -2227,19 +2288,19 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" +checksum = "3550b5656b791fec59c0b6089b4d0368db746a34749ccd47e59afb01aa877e9e" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-debug-types" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b421786850ce05627355ee616c4a5fdc4a9ad1591859ede5e5564ab74aa4abd2" +checksum = "cd1494f102ad5b9fa43e391d2601186dc601f41ab7dcd8a23ecca9bf3ef930f4" dependencies = [ "memchr", "miden-crypto", @@ -2249,8 +2310,8 @@ dependencies = [ "miden-utils-sync", "paste", "serde", - "serde_spanned 1.0.4", - "thiserror", + "serde_spanned", + "thiserror 2.0.18", ] [[package]] @@ -2264,14 +2325,14 @@ dependencies = [ [[package]] name = "miden-mast-package" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169025a61c2ca2e8a0f53f20a7bdcbdd1f8e34f528676137208bff64944652bb" +checksum = "692185bfbe0ecdb28bf623f1f8c88282cd6727ba081a28e23b301bdde1b45be4" dependencies = [ "derive_more", "miden-assembly-syntax", "miden-core", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -2297,10 +2358,10 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.114", + "syn 2.0.117", "terminal_size 0.3.0", "textwrap", - "thiserror", + "thiserror 2.0.18", "trybuild", "unicode-width 0.1.14", ] @@ -2313,7 +2374,7 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2351,7 +2412,6 @@ version = "0.14.0" dependencies = [ "anyhow", "clap", - "figment", "fs-err", "hex", "humantime", @@ -2392,7 +2452,7 @@ dependencies = [ "rstest", "serial_test", "tempfile", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2412,7 +2472,7 @@ dependencies = [ "deadpool-sync", "diesel", "miden-protocol", - "thiserror", + "thiserror 2.0.18", "tracing", ] @@ -2421,7 +2481,7 @@ name = "miden-node-grpc-error-macro" version = "0.14.0" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2445,7 +2505,7 @@ dependencies = [ "rand_chacha", "rstest", "tempfile", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -2472,7 +2532,7 @@ dependencies = [ "miette", "proptest", "prost", - "thiserror", + "thiserror 2.0.18", "tonic", "tonic-prost", "tonic-prost-build", @@ -2513,7 +2573,7 @@ dependencies = [ "rstest", "semver 1.0.27", "tempfile", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2560,10 +2620,10 @@ dependencies = [ "serde", "tempfile", "termtree", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", - "toml 0.9.11+spec-1.1.0", + "toml 1.0.3+spec-1.1.0", "tonic", "tonic-reflection", "tower-http", @@ -2598,7 +2658,7 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2607,7 +2667,6 @@ version = "0.14.0" dependencies = [ "anyhow", "bytes", - "figment", "http", "http-body-util", "itertools 0.14.0", @@ -2618,8 +2677,7 @@ dependencies = [ "opentelemetry-otlp", "opentelemetry_sdk", "rand", - "serde", - "thiserror", + "thiserror 2.0.18", "tokio", "tonic", "tower-http", @@ -2643,7 +2701,7 @@ dependencies = [ "miden-node-utils", "miden-protocol", "miden-tx", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2654,9 +2712,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18a6a5eebe64e81a29be6321ee8f4478c6bfaf619b7689825884e8cd308c044" +checksum = "0e09f7916b1e7505f74a50985a185fdea4c0ceb8f854a34c90db28e3f7da7ab6" dependencies = [ "itertools 0.14.0", "miden-air", @@ -2666,7 +2724,7 @@ dependencies = [ "miden-utils-indexing", "paste", "rayon", - "thiserror", + "thiserror 2.0.18", "tokio", "tracing", "winter-prover", @@ -2696,8 +2754,8 @@ dependencies = [ "regex", "semver 1.0.27", "serde", - "thiserror", - "toml 0.9.11+spec-1.1.0", + "thiserror 2.0.18", + "toml 0.9.12+spec-1.1.0", "walkdir", "winter-rand-utils", ] @@ -2709,14 +2767,14 @@ source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdf dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-prover" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83070f0ca1a08235362e990238b6487191f814054aaebcc40883a073fdcd18f9" +checksum = "d45e30526be72b8af0fd1d8b24c9cba8ac1187ca335dcee38b8e5e20234e7698" dependencies = [ "miden-air", "miden-debug-types", @@ -2765,13 +2823,13 @@ name = "miden-remote-prover-client" version = "0.14.0" dependencies = [ "fs-err", - "getrandom 0.3.4", + "getrandom 0.4.1", "miden-node-proto-build", "miden-protocol", "miden-tx", "miette", "prost", - "thiserror", + "thiserror 2.0.18", "tokio", "tonic", "tonic-prost", @@ -2793,7 +2851,7 @@ dependencies = [ "miden-protocol", "rand", "regex", - "thiserror", + "thiserror 2.0.18", "walkdir", ] @@ -2815,7 +2873,7 @@ dependencies = [ "miden-tx-batch-prover", "rand", "rand_chacha", - "thiserror", + "thiserror 2.0.18", "winterfell", ] @@ -2829,7 +2887,7 @@ dependencies = [ "miden-prover", "miden-standards", "miden-verifier", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -2843,9 +2901,9 @@ dependencies = [ [[package]] name = "miden-utils-core-derive" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fc6d350fb9ad44797e8d0a1feaacaa6ee4079ef752d9ababc101ffc40ec354" +checksum = "a1b1d490e6d7b509622d3c2cc69ffd66ad48bf953dc614579b568fe956ce0a6c" dependencies = [ "proc-macro2", "quote", @@ -2854,9 +2912,9 @@ dependencies = [ [[package]] name = "miden-utils-diagnostics" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af2462fb2e750247a56264eddf40e2e1c8d96ff9379abe73acbcbe81e530e1d5" +checksum = "52658f6dc091c1c78e8b35ee3e7ff3dad53051971a3c514e461f581333758fe7" dependencies = [ "miden-crypto", "miden-debug-types", @@ -2867,18 +2925,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57046b5c263b78e7fa5a6e328ca852e6319cf844faa26fbdcbb128ec555deb2a" +checksum = "eeff7bcb7875b222424bdfb657a7cf21a55e036aa7558ebe1f5d2e413b440d0d" dependencies = [ - "thiserror", + "thiserror 2.0.18", ] [[package]] name = "miden-utils-sync" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d3e129b62099672a1ffc012ab2e26ee7f2b35e4ca18ca1f726b88c53546ddd" +checksum = "41d53d1ab5b275d8052ad9c4121071cb184bc276ee74354b0d8a2075e5c1d1f0" dependencies = [ "lock_api", "loom", @@ -2887,13 +2945,13 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe033af062937938ded511e5238db3bf8e0c1a30205850d62fb23271b3c96f85" +checksum = "b13816663794beb15c8a4721c15252eb21f3b3233525684f60c7888837a98ff4" dependencies = [ "miden-air", "miden-core", - "thiserror", + "thiserror 2.0.18", "tracing", "winter-verifier", ] @@ -2906,7 +2964,7 @@ checksum = "9d4cfab04baffdda3fb9eafa5f873604059b89a1699aa95e4f1057397a69f0b5" dependencies = [ "miden-formatting", "smallvec", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -2936,7 +2994,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2946,7 +3004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", ] [[package]] @@ -3007,23 +3065,6 @@ dependencies = [ "getrandom 0.2.17", ] -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe 0.1.6", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -3096,7 +3137,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3183,56 +3224,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - [[package]] name = "openssl-probe" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" -[[package]] -name = "openssl-sys" -version = "0.9.111" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.31.0" @@ -3243,7 +3240,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror", + "thiserror 2.0.18", "tracing", ] @@ -3258,7 +3255,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror", + "thiserror 2.0.18", "tokio", "tonic", ] @@ -3288,16 +3285,26 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand", - "thiserror", + "thiserror 2.0.18", "tokio", "tokio-stream", ] [[package]] name = "owo-colors" -version = "4.2.3" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" + +[[package]] +name = "page_size" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "parking_lot" @@ -3328,29 +3335,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "pear" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" -dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn 2.0.114", -] - [[package]] name = "percent-encoding" version = "2.3.2" @@ -3367,6 +3351,17 @@ dependencies = [ "indexmap", ] +[[package]] +name = "petgraph" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +dependencies = [ + "fixedbitset", + "hashbrown 0.15.5", + "indexmap", +] + [[package]] name = "phf_shared" version = "0.11.3" @@ -3393,7 +3388,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3465,15 +3460,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" dependencies = [ "portable-atomic", ] @@ -3525,7 +3520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3534,7 +3529,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", + "toml_edit", ] [[package]] @@ -3546,24 +3541,11 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "proc-macro2-diagnostics" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", - "version_check", - "yansi", -] - [[package]] name = "proptest" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", @@ -3586,7 +3568,7 @@ checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3601,23 +3583,22 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ "heck", "itertools 0.14.0", "log", "multimap", - "once_cell", - "petgraph", + "petgraph 0.8.3", "prettyplease", "prost", "prost-types", "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.114", + "syn 2.0.117", "tempfile", ] @@ -3631,7 +3612,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3648,9 +3629,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ "prost", ] @@ -3667,7 +3648,7 @@ dependencies = [ "prost-reflect", "prost-types", "protox-parse", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -3679,14 +3660,14 @@ dependencies = [ "logos", "miette", "prost-types", - "thiserror", + "thiserror 2.0.18", ] [[package]] name = "pulldown-cmark" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +checksum = "83c41efbf8f90ac44de7f3a868f0867851d261b56291732d0cbf7cceaaeb55a6" dependencies = [ "bitflags", "memchr", @@ -3695,9 +3676,9 @@ dependencies = [ [[package]] name = "pulldown-cmark-to-cmark" -version = "21.1.0" +version = "22.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" +checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" dependencies = [ "pulldown-cmark", ] @@ -3708,6 +3689,62 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", +] + [[package]] name = "quote" version = "1.0.44" @@ -3819,9 +3856,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -3831,9 +3868,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -3842,9 +3879,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "relative-path" @@ -3854,9 +3891,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.28" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64", "bytes", @@ -3868,21 +3905,22 @@ dependencies = [ "http-body-util", "hyper", "hyper-rustls", - "hyper-tls", "hyper-util", "js-sys", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", + "tokio-rustls", "tower", "tower-http", "tower-service", @@ -3932,8 +3970,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" dependencies = [ - "hashbrown", - "thiserror", + "hashbrown 0.16.1", + "thiserror 2.0.18", ] [[package]] @@ -3961,7 +3999,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] @@ -4005,28 +4043,29 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "rustix" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ "bitflags", "errno", "libc", - "linux-raw-sys 0.11.0", + "linux-raw-sys 0.12.1", "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.36" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", @@ -4042,10 +4081,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -4054,15 +4093,44 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4088,9 +4156,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -4153,22 +4221,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ "bitflags", "core-foundation 0.10.1", @@ -4179,9 +4234,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -4239,7 +4294,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4266,15 +4321,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - [[package]] name = "serde_spanned" version = "1.0.4" @@ -4298,9 +4344,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" +checksum = "911bd979bf1070a3f3aa7b691a3b3e9968f339ceeec89e08c280a8a22207a32f" dependencies = [ "futures-executor", "futures-util", @@ -4313,13 +4359,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" +checksum = "0a7d91949b85b0d2fb687445e448b40d322b6b3e4af6b44a29b21d9a5f33e6d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4386,9 +4432,9 @@ checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" @@ -4516,9 +4562,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.114" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -4542,14 +4588,14 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ "bitflags", "core-foundation 0.9.4", @@ -4574,14 +4620,14 @@ checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" [[package]] name = "tempfile" -version = "3.24.0" +version = "3.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.1", "once_cell", - "rustix 1.1.3", + "rustix 1.1.4", "windows-sys 0.61.2", ] @@ -4619,15 +4665,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.1.3", + "rustix 1.1.4", "windows-sys 0.60.2", ] [[package]] name = "termtree" -version = "0.5.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" +checksum = "d4d1330fe7f7f872cd05165130b10602d667b205fd85be09be2814b115d4ced9" [[package]] name = "textwrap" @@ -4640,13 +4686,33 @@ dependencies = [ "unicode-width 0.2.2", ] +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + [[package]] name = "thiserror" version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", ] [[package]] @@ -4657,7 +4723,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4720,6 +4786,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.49.0" @@ -4745,17 +4826,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "syn 2.0.117", ] [[package]] @@ -4795,25 +4866,13 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.23" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned 0.6.9", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", -] - -[[package]] -name = "toml" -version = "0.9.11+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ "indexmap", "serde_core", - "serde_spanned 1.0.4", + "serde_spanned", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", @@ -4821,12 +4880,18 @@ dependencies = [ ] [[package]] -name = "toml_datetime" -version = "0.6.11" +name = "toml" +version = "1.0.3+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +checksum = "c7614eaf19ad818347db24addfa201729cf2a9b6fdfd9eb0ab870fcacc606c0c" dependencies = [ - "serde", + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime 1.0.0+spec-1.1.0", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -4839,17 +4904,12 @@ dependencies = [ ] [[package]] -name = "toml_edit" -version = "0.22.27" +name = "toml_datetime" +version = "1.0.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" dependencies = [ - "indexmap", - "serde", - "serde_spanned 0.6.9", - "toml_datetime 0.6.11", - "toml_write", - "winnow", + "serde_core", ] [[package]] @@ -4866,19 +4926,13 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - [[package]] name = "toml_writer" version = "1.0.6+spec-1.1.0" @@ -4887,9 +4941,9 @@ checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ "async-trait", "axum", @@ -4918,21 +4972,21 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" +checksum = "1882ac3bf5ef12877d7ed57aad87e75154c11931c2ba7e6cde5e22d63522c734" dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "tonic-health" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a82868bf299e0a1d2e8dce0dc33a46c02d6f045b2c1f1d6cc8dc3d0bf1812ef" +checksum = "f4ff0636fef47afb3ec02818f5bceb4377b8abb9d6a386aeade18bd6212f8eb7" dependencies = [ "prost", "tokio", @@ -4943,9 +4997,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" dependencies = [ "bytes", "prost", @@ -4954,25 +5008,25 @@ dependencies = [ [[package]] name = "tonic-prost-build" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" +checksum = "f3144df636917574672e93d0f56d7edec49f90305749c668df5101751bb8f95a" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "prost-types", "quote", - "syn 2.0.114", + "syn 2.0.117", "tempfile", "tonic-build", ] [[package]] name = "tonic-reflection" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34da53e8387581d66db16ff01f98a70b426b091fdf76856e289d5c1bd386ed7b" +checksum = "aaf0685a51e6d02b502ba0764002e766b7f3042aed13d9234925b6ffbfa3fca7" dependencies = [ "prost", "prost-types", @@ -4984,9 +5038,9 @@ dependencies = [ [[package]] name = "tonic-web" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75214f6b6bd28c19aa752ac09fdf0eea546095670906c21fe3940e180a4c43f2" +checksum = "29453d84de05f4f1b573db22e6f9f6c95c189a6089a440c9a098aa9dea009299" dependencies = [ "base64", "bytes", @@ -5002,9 +5056,9 @@ dependencies = [ [[package]] name = "tonic-web-wasm-client" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898cd44be5e23e59d2956056538f1d6b3c5336629d384ffd2d92e76f87fb98ff" +checksum = "e8e21e20b94f808d6f2244a5d960d02c28dd82066abddd2f27019bac0535f310" dependencies = [ "base64", "byteorder", @@ -5016,7 +5070,7 @@ dependencies = [ "httparse", "js-sys", "pin-project", - "thiserror", + "thiserror 2.0.18", "tonic", "tower-service", "wasm-bindgen", @@ -5096,7 +5150,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5111,13 +5165,13 @@ dependencies = [ [[package]] name = "tracing-forest" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" +checksum = "f09cb459317a3811f76644334473239d696cd8efc606963ae7d1c308cead3b74" dependencies = [ "chrono", "smallvec", - "thiserror", + "thiserror 2.0.18", "tracing", "tracing-subscriber", ] @@ -5188,9 +5242,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.114" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17e807bff86d2a06b52bca4276746584a78375055b6e45843925ce2802b335" +checksum = "47c635f0191bd3a2941013e5062667100969f8c4e9cd787c14f977265d73616e" dependencies = [ "dissimilar", "glob", @@ -5199,7 +5253,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 0.9.11+spec-1.1.0", + "toml 1.0.3+spec-1.1.0", ] [[package]] @@ -5214,15 +5268,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "uncased" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" -dependencies = [ - "version_check", -] - [[package]] name = "unicase" version = "2.9.0" @@ -5231,9 +5276,9 @@ checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-linebreak" @@ -5370,11 +5415,20 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "60722a937f594b7fde9adb894d7c092fc1bb6612897c46368d18e7a20208eff2" dependencies = [ "cfg-if", "once_cell", @@ -5385,9 +5439,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "8a89f4650b770e4521aa6573724e2aed4704372151bd0de9d16a3bbabb87441a" dependencies = [ "cfg-if", "futures-util", @@ -5399,9 +5453,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "0fac8c6395094b6b91c4af293f4c79371c163f9a6f56184d2c9a85f5a95f3950" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5409,31 +5463,53 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "ab3fabce6159dc20728033842636887e4877688ae94382766e00b180abac9d60" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "de0e091bdb824da87dc01d967388880d017a0a9bc4f3bdc0d86ee9f9336e3bb5" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + [[package]] name = "wasm-streams" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +checksum = "9d1ec4f6517c9e11ae630e200b2b65d193279042e28edd4a2cda233e46670bbb" dependencies = [ "futures-util", "js-sys", @@ -5442,11 +5518,23 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver 1.0.27", +] + [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "705eceb4ce901230f8625bd1d665128056ccbe4b7408faa625eec1ba80f59a97" dependencies = [ "js-sys", "wasm-bindgen", @@ -5462,6 +5550,31 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + [[package]] name = "winapi-util" version = "0.1.11" @@ -5471,6 +5584,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.62.2" @@ -5492,7 +5611,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5503,7 +5622,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5543,27 +5662,27 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.42.2", ] [[package]] name = "windows-sys" -version = "0.52.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] @@ -5586,6 +5705,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -5634,6 +5768,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5652,6 +5792,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -5670,6 +5816,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -5700,6 +5852,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -5718,6 +5876,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -5736,6 +5900,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -5754,6 +5924,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -5833,7 +6009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5899,6 +6075,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver 1.0.27", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" @@ -5941,28 +6199,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5982,7 +6240,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -6022,11 +6280,11 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zmij" -version = "1.0.17" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml index 2f6828f98..3bcb715ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -99,7 +99,8 @@ pretty_assertions = { version = "1.4" } prost = { default-features = false, version = "=0.14.3" } protox = { version = "=0.9.1" } rand = { version = "0.9" } -rand_chacha = { version = "0.9" } +rand_chacha = { default-features = false, version = "0.9" } +reqwest = { version = "0.13" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } tempfile = { version = "3" } @@ -107,7 +108,7 @@ thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } tokio-util = { version = "0.7" } -toml = { version = "0.9" } +toml = "1.0" tonic = { default-features = false, version = "0.14" } tonic-health = { version = "0.14" } tonic-prost = { version = "0.14" } diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 6667a4ded..357169c02 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -26,9 +26,9 @@ miden-protocol = { features = ["std", "testing"], workspace = true } miden-standards = { workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["std"], workspace = true } -rand = { version = "0.9" } -rand_chacha = { version = "0.9" } -reqwest = { features = ["json"], version = "0.12" } +rand = { workspace = true } +rand_chacha = { workspace = true } +reqwest = { features = ["json", "query"], workspace = true } serde = { features = ["derive"], version = "1.0" } serde_json = { version = "1.0" } sha2 = { version = "0.10" } diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 2743f3e8d..700ce3706 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -37,5 +37,4 @@ url = { workspace = true } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } [dev-dependencies] -figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 9778daec8..79464a987 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.90-slim-bullseye AS chef +FROM rust:1.91-slim-bullseye AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index 474190ca6..6ca345217 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -29,7 +29,7 @@ miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } miden-standards = { workspace = true } miden-tx-batch-prover = { workspace = true } -rand = { version = "0.9" } +rand = { workspace = true } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } @@ -49,7 +49,7 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } pretty_assertions = "1.4" -rand_chacha = { default-features = false, version = "0.9" } +rand_chacha = { default-features = false, workspace = true } rstest = { workspace = true } serial_test = "3.2" tempfile = { workspace = true } diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index f73600f27..e21d19f18 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -21,9 +21,9 @@ std = ["miden-protocol/std", "miden-tx/std"] tx-prover = ["dep:miden-protocol", "dep:miden-tx", "dep:tokio"] [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] -getrandom = { features = ["wasm_js"], version = "0.3" } +getrandom = { features = ["wasm_js"], version = "0.4" } tonic = { features = ["codegen"], workspace = true } -tonic-web-wasm-client = { default-features = false, version = "0.8" } +tonic-web-wasm-client = { default-features = false, version = "0.9" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 276a4cf25..537173e67 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -42,6 +42,6 @@ miden-node-store = { features = ["rocksdb"], workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { workspace = true } -reqwest = { version = "0.12" } +reqwest = { workspace = true } rstest = { workspace = true } tempfile = { workspace = true } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index fd97f9195..59dae55e1 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -42,7 +42,7 @@ serde = { features = ["derive"], version = "1" } thiserror = { workspace = true } tokio = { features = ["fs", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } +toml = { workspace = true } tonic = { default-features = true, workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } @@ -57,7 +57,7 @@ miden-protocol = { features = ["std"], workspace = true } [dev-dependencies] assert_matches = { workspace = true } -criterion = { version = "0.5" } +criterion = "0.8" fs-err = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } @@ -66,7 +66,7 @@ miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } tempfile = { workspace = true } -termtree = { version = "0.5" } +termtree = "1.0" [features] default = ["rocksdb"] diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 2c5fea6e5..f2817c604 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -21,7 +21,6 @@ testing = ["miden-protocol/testing"] [dependencies] anyhow = { workspace = true } bytes = { version = "1.10" } -figment = { features = ["env", "toml"], version = "0.10" } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } @@ -31,13 +30,12 @@ opentelemetry = { version = "0.31" } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } -serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } tokio = { workspace = true } tonic = { default-features = true, workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } -tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } +tracing-forest = { features = ["chrono"], optional = true, version = "0.3" } tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } diff --git a/crates/utils/src/config.rs b/crates/utils/src/config.rs deleted file mode 100644 index b29c9060f..000000000 --- a/crates/utils/src/config.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::path::Path; - -use figment::Figment; -use figment::providers::{Format, Toml}; -use serde::Deserialize; - -pub const DEFAULT_NODE_RPC_PORT: u16 = 57291; -pub const DEFAULT_BLOCK_PRODUCER_PORT: u16 = 48046; -pub const DEFAULT_STORE_PORT: u16 = 28943; -pub const DEFAULT_FAUCET_SERVER_PORT: u16 = 8080; - -/// Loads the user configuration. -/// -/// This function will look for the configuration file at the provided path. If the path is -/// relative, searches in parent directories all the way to the root as well. -/// -/// The above configuration options are indented to support easy of packaging and deployment. -#[expect(clippy::result_large_err, reason = "This error crashes the node")] -pub fn load_config Deserialize<'a>>( - config_file: impl AsRef, -) -> figment::Result { - Figment::from(Toml::file(config_file.as_ref())).extract() -} diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 530e971e4..abf785263 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -1,4 +1,3 @@ -pub mod config; pub mod cors; pub mod crypto; #[cfg(feature = "testing")] From 7ed6ecd2afd5a96b56ce9724c183ef0d1a56aa2e Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 27 Feb 2026 06:29:45 +1300 Subject: [PATCH 56/77] feat(validator): KMS signing (#1677) --- CHANGELOG.md | 1 + Cargo.lock | 671 +++++++++++++++++-- bin/node/.env | 1 + bin/node/src/commands/bundled.rs | 25 +- bin/node/src/commands/mod.rs | 56 +- bin/node/src/commands/store.rs | 85 ++- bin/node/src/commands/validator.rs | 45 +- bin/stress-test/src/seeding/mod.rs | 6 +- crates/block-producer/src/server/tests.rs | 8 +- crates/rpc/src/tests.rs | 6 +- crates/store/src/db/tests.rs | 24 +- crates/store/src/errors.rs | 19 - crates/store/src/genesis/config/tests.rs | 12 +- crates/store/src/genesis/mod.rs | 21 +- crates/store/src/server/mod.rs | 5 +- crates/utils/src/lib.rs | 1 + crates/utils/src/signer.rs | 36 + crates/validator/Cargo.toml | 3 + crates/validator/src/block_validation/mod.rs | 15 +- crates/validator/src/lib.rs | 2 + crates/validator/src/server/mod.rs | 20 +- crates/validator/src/signers/kms.rs | 125 ++++ crates/validator/src/signers/mod.rs | 44 ++ 23 files changed, 1052 insertions(+), 179 deletions(-) create mode 100644 crates/utils/src/signer.rs create mode 100644 crates/validator/src/signers/kms.rs create mode 100644 crates/validator/src/signers/mod.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index b74d3cc59..e8798870f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). - [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). +- Added KMS signing support in validator ([#1677](https://github.com/0xMiden/miden-node/pull/1677)). ## v0.13.7 (2026-02-25) diff --git a/Cargo.lock b/Cargo.lock index 763c7cf5b..f2df1dd16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -172,6 +172,48 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-config" +version = "1.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8fc176d53d6fe85017f230405e3255cedb4a02221cb55ed6d76dccbbb099b2" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex", + "http 1.4.0", + "ring", + "time", + "tokio", + "tracing", + "url", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d203b0bf2626dcba8665f5cd0871d7c2c0930223d6b6be9097592fea21242d0" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + [[package]] name = "aws-lc-rs" version = "1.16.0" @@ -194,6 +236,331 @@ dependencies = [ "fs_extra", ] +[[package]] +name = "aws-runtime" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede2ddc593e6c8acc6ce3358c28d6677a6dc49b65ba4b37a2befe14a11297e75" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "bytes-utils", + "fastrand", + "http 1.4.0", + "http-body 1.0.1", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-kms" +version = "1.102.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22b682ef733ec24c300b11cec2df9bfea7ee4bf48ab2030c832e27db92b69c68" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c5ff27c6ba2cbd95e6e26e2e736676fdf6bcf96495b187733f521cfe4ce448" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.97.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d186f1e5a3694a188e5a0640b3115ccc6e084d104e16fd6ba968dca072ffef8" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.99.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9acba7c62f3d4e2408fa998a3a8caacd8b9a5b5549cf36e2372fbdae329d5449" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37411f8e0f4bea0c3ca0958ce7f18f6439db24d555dbd809787262cd00926aa9" +dependencies = [ + "aws-credential-types", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.12", + "http 1.4.0", + "percent-encoding", + "sha2", + "time", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc50d0f63e714784b84223abd7abbc8577de8c35d699e0edd19f0a88a08ae13" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-http" +version = "0.63.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d619373d490ad70966994801bc126846afaa0d1ee920697a031f0cf63f2568e7" +dependencies = [ + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00ccbb08c10f6bcf912f398188e42ee2eab5f1767ce215a02a73bc5df1bbdd95" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.3.27", + "h2 0.4.13", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper 1.8.1", + "hyper-rustls 0.24.2", + "hyper-rustls 0.27.7", + "hyper-util", + "pin-project-lite", + "rustls 0.21.12", + "rustls 0.23.37", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.62.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b3a779093e18cad88bbae08dc4261e1d95018c4c5b9356a52bcae7c0b6e9bb" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3f39d5bb871aaf461d59144557f16d5927a5248a983a40654d9cf3b9ba183b" +dependencies = [ + "aws-smithy-runtime-api", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f76a580e3d8f8961e5d48763214025a2af65c2fa4cd1fb7f270a0e107a71b0" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ccf7f6eba8b2dcf8ce9b74806c6c185659c311665c4bf8d6e71ebd454db6bf" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4af6e5def28be846479bbeac55aa4603d6f7986fc5da4601ba324dd5d377516" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.4.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca2734c16913a45343b37313605d84e7d8b34a4611598ce1d25b35860a2bed3" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b53543b4b86ed43f051644f704a98c7291b3618b67adf057ee77a366fa52fcaa" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0470cc047657c6e286346bdf10a8719d26efd6a91626992e0e64481e44323e96" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version 0.4.1", + "tracing", +] + [[package]] name = "axum" version = "0.8.8" @@ -204,10 +571,10 @@ dependencies = [ "bytes", "form_urlencoded", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-util", "itoa", "matchit", @@ -235,8 +602,8 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -282,6 +649,16 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.8.3" @@ -393,6 +770,16 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "bzip2-sys" version = "0.1.13+1.0.8" @@ -1383,6 +1770,25 @@ dependencies = [ "subtle", ] +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "h2" version = "0.4.13" @@ -1394,7 +1800,7 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http", + "http 1.4.0", "indexmap", "slab", "tokio", @@ -1472,6 +1878,17 @@ dependencies = [ "digest", ] +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http" version = "1.4.0" @@ -1482,6 +1899,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -1489,7 +1917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http", + "http 1.4.0", ] [[package]] @@ -1500,8 +1928,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1523,6 +1951,30 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.8.1" @@ -1533,9 +1985,9 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -1546,19 +1998,35 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http", - "hyper", + "http 1.4.0", + "hyper 1.8.1", "hyper-util", - "rustls", + "rustls 0.23.37", + "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower-service", ] @@ -1568,7 +2036,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", @@ -1585,14 +2053,14 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http", - "http-body", - "hyper", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.2", "system-configuration", "tokio", "tower-service", @@ -2522,7 +2990,7 @@ dependencies = [ "assert_matches", "fs-err", "hex", - "http", + "http 1.4.0", "miden-node-grpc-error-macro", "miden-node-proto-build", "miden-node-rocksdb-cxx-linkage-fix", @@ -2559,7 +3027,7 @@ version = "0.14.0" dependencies = [ "anyhow", "futures", - "http", + "http 1.4.0", "mediatype", "miden-air", "miden-node-proto", @@ -2667,7 +3135,7 @@ version = "0.14.0" dependencies = [ "anyhow", "bytes", - "http", + "http 1.4.0", "http-body-util", "itertools 0.14.0", "lru", @@ -2693,8 +3161,11 @@ name = "miden-node-validator" version = "0.14.0" dependencies = [ "anyhow", + "aws-config", + "aws-sdk-kms", "diesel", "diesel_migrations", + "k256", "miden-node-db", "miden-node-proto", "miden-node-proto-build", @@ -2791,7 +3262,7 @@ dependencies = [ "anyhow", "async-trait", "clap", - "http", + "http 1.4.0", "humantime", "miden-block-prover", "miden-node-proto", @@ -3250,7 +3721,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ - "http", + "http 1.4.0", "opentelemetry", "opentelemetry-proto", "opentelemetry_sdk", @@ -3290,6 +3761,12 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "outref" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" + [[package]] name = "owo-colors" version = "4.3.0" @@ -3701,8 +4178,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", - "socket2", + "rustls 0.23.37", + "socket2 0.6.2", "thiserror 2.0.18", "tokio", "tracing", @@ -3722,7 +4199,7 @@ dependencies = [ "rand", "ring", "rustc-hash", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "slab", "thiserror 2.0.18", @@ -3740,7 +4217,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.2", "tracing", "windows-sys 0.60.2", ] @@ -3877,6 +4354,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" + [[package]] name = "regex-syntax" version = "0.8.10" @@ -3899,12 +4382,12 @@ dependencies = [ "bytes", "encoding_rs", "futures-core", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", @@ -3912,7 +4395,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "rustls-platform-verifier", "serde", @@ -3920,7 +4403,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower", "tower-http", "tower-service", @@ -4059,6 +4542,18 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.37" @@ -4070,7 +4565,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.103.9", "subtle", "zeroize", ] @@ -4108,10 +4603,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", + "rustls 0.23.37", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.103.9", "security-framework", "security-framework-sys", "webpki-root-certs", @@ -4124,6 +4619,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.103.9" @@ -4199,6 +4704,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sdd" version = "3.0.10" @@ -4448,6 +4963,16 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.2" @@ -4813,7 +5338,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -4829,13 +5354,23 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls", + "rustls 0.23.37", "tokio", ] @@ -4949,20 +5484,20 @@ dependencies = [ "axum", "base64", "bytes", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "rustls-native-certs", - "socket2", + "socket2 0.6.2", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tokio-stream", "tower", "tower-layer", @@ -5044,8 +5579,8 @@ checksum = "29453d84de05f4f1b573db22e6f9f6c95c189a6089a440c9a098aa9dea009299" dependencies = [ "base64", "bytes", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "pin-project", "tokio-stream", "tonic", @@ -5064,8 +5599,8 @@ dependencies = [ "byteorder", "bytes", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "httparse", "js-sys", @@ -5107,8 +5642,8 @@ dependencies = [ "bitflags", "bytes", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "iri-string", "pin-project-lite", @@ -5333,6 +5868,12 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -5345,6 +5886,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -5363,6 +5914,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "vte" version = "0.14.1" @@ -6174,6 +6731,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "yansi" version = "1.0.1" diff --git a/bin/node/.env b/bin/node/.env index 6bdfa9a80..02bceb57e 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -11,6 +11,7 @@ MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= MIDEN_NODE_VALIDATOR_KEY= +MIDEN_NODE_VALIDATOR_KMS_KEY_ID= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 707e01193..9ca187207 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -7,7 +7,7 @@ use miden_node_block_producer::BlockProducer; use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; -use miden_node_validator::Validator; +use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; @@ -17,14 +17,13 @@ use url::Url; use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, + BundledValidatorConfig, DEFAULT_TIMEOUT, ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_KEY, - INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, - ValidatorConfig, + ValidatorKey, duration_to_human_readable_string, }; @@ -47,16 +46,9 @@ pub enum BundledCommand { /// Constructs the genesis block from the given toml file. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "FILE")] genesis_config_file: Option, - /// Insecure, hex-encoded validator secret key for development and testing purposes. - /// - /// If not provided, a predefined key is used. - #[arg( - long = "validator.key", - env = ENV_VALIDATOR_KEY, - value_name = "VALIDATOR_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_key: String, + /// Configuration for the Validator key used to sign genesis block. + #[command(flatten)] + validator_key: ValidatorKey, }, /// Runs all three node components in the same process. @@ -83,7 +75,7 @@ pub enum BundledCommand { ntx_builder: NtxBuilderConfig, #[command(flatten)] - validator: ValidatorConfig, + validator: BundledValidatorConfig, /// Enables the exporting of traces for OpenTelemetry. /// @@ -156,7 +148,7 @@ impl BundledCommand { data_directory: PathBuf, block_producer: BlockProducerConfig, ntx_builder: NtxBuilderConfig, - validator: ValidatorConfig, + validator: BundledValidatorConfig, grpc_timeout: Duration, ) -> anyhow::Result<()> { // Start listening on all gRPC urls so that inter-component connections can be created @@ -313,6 +305,7 @@ impl BundledCommand { if let Some(address) = validator_socket_address { let secret_key_bytes = hex::decode(validator.validator_key)?; let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; + let signer = ValidatorSigner::new_local(signer); let id = join_set .spawn({ async move { diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 352a6de16..b7ef3c3c5 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -10,6 +10,9 @@ use miden_node_block_producer::{ DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH, }; +use miden_node_validator::ValidatorSigner; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; use url::Url; @@ -41,6 +44,7 @@ const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; const ENV_VALIDATOR_KEY: &str = "MIDEN_NODE_VALIDATOR_KEY"; +const ENV_VALIDATOR_KMS_KEY_ID: &str = "MIDEN_NODE_VALIDATOR_KMS_KEY_ID"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -51,9 +55,55 @@ fn duration_to_human_readable_string(duration: Duration) -> String { humantime::format_duration(duration).to_string() } -/// Configuration for the Validator component. +/// Configuration for the Validator key used to sign blocks. +/// +/// Used by the Validator command and the genesis bootstrap command. #[derive(clap::Args)] -pub struct ValidatorConfig { +#[group(required = true, multiple = false)] +pub struct ValidatorKey { + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + /// + /// Cannot be used with `validator.key.kms-id`. + #[arg( + long = "validator.key.hex", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX, + )] + validator_key: String, + /// Key ID for the KMS key used by validator to sign blocks. + /// + /// Cannot be used with `validator.key.hex`. + #[arg( + long = "validator.key.kms-id", + env = ENV_VALIDATOR_KMS_KEY_ID, + value_name = "VALIDATOR_KMS_KEY_ID", + )] + validator_kms_key_id: Option, +} + +impl ValidatorKey { + /// Consumes the validator key configuration and returns a KMS or local key signer depending on + /// the supplied configuration. + pub async fn into_signer(self) -> anyhow::Result { + if let Some(kms_key_id) = self.validator_kms_key_id { + // Use KMS key ID to create a ValidatorSigner. + let signer = ValidatorSigner::new_kms(kms_key_id).await?; + Ok(signer) + } else { + // Use hex-encoded key to create a ValidatorSigner. + let signer = SecretKey::read_from_bytes(hex::decode(self.validator_key)?.as_ref())?; + let signer = ValidatorSigner::new_local(signer); + Ok(signer) + } + } +} + +/// Configuration for the Validator component when run in the bundled mode. +#[derive(clap::Args)] +pub struct BundledValidatorConfig { /// Insecure, hex-encoded validator secret key for development and testing purposes. /// Only used when the Validator URL argument is not set. #[arg( @@ -70,7 +120,7 @@ pub struct ValidatorConfig { validator_url: Option, } -impl ValidatorConfig { +impl BundledValidatorConfig { /// Converts the [`ValidatorConfig`] into a URL and an optional [`SocketAddr`]. /// /// If the `validator_url` is set, it returns the URL and `None` for the [`SocketAddr`]. diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 7bf56f4a8..14b266147 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -5,8 +5,8 @@ use anyhow::Context; use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; use miden_node_utils::grpc::UrlExt; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; -use miden_protocol::utils::Deserializable; +use miden_node_utils::signer::BlockSigner; +use miden_node_validator::ValidatorSigner; use url::Url; use super::{ @@ -20,8 +20,7 @@ use crate::commands::{ ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_KEY, - INSECURE_VALIDATOR_KEY_HEX, + ValidatorKey, duration_to_human_readable_string, }; @@ -44,18 +43,9 @@ pub enum StoreCommand { /// Use the given configuration file to construct the genesis state from. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "GENESIS_CONFIG")] genesis_config_file: Option, - /// Insecure, hex-encoded validator secret key for development and testing purposes. - /// - /// Used to sign the genesis block in the bootstrap process. - /// - /// If not provided, a predefined key is used. - #[arg( - long = "validator.key", - env = ENV_VALIDATOR_KEY, - value_name = "VALIDATOR_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_key: String, + /// Configuration for the Validator key used to sign genesis block. + #[command(flatten)] + validator_key: ValidatorKey, }, /// Starts the store component. @@ -112,12 +102,15 @@ impl StoreCommand { accounts_directory, genesis_config_file, validator_key, - } => Self::bootstrap( - &data_directory, - &accounts_directory, - genesis_config_file.as_ref(), - validator_key, - ), + } => { + Self::bootstrap( + &data_directory, + &accounts_directory, + genesis_config_file.as_ref(), + validator_key, + ) + .await + }, StoreCommand::Start { rpc_url, ntx_builder_url, @@ -190,15 +183,12 @@ impl StoreCommand { .context("failed while serving store component") } - fn bootstrap( + async fn bootstrap( data_directory: &Path, accounts_directory: &Path, genesis_config: Option<&PathBuf>, - validator_key: String, + validator_key: ValidatorKey, ) -> anyhow::Result<()> { - // Decode the validator key. - let signer = SecretKey::read_from_bytes(&hex::decode(validator_key)?)?; - // Parse genesis config (or default if not given). let config = genesis_config .map(|file_path| { @@ -209,8 +199,6 @@ impl StoreCommand { .transpose()? .unwrap_or_default(); - let (genesis_state, secrets) = config.into_state(signer)?; - // Create directories if they do not already exist. for directory in &[accounts_directory, data_directory] { if fs_err::exists(directory)? { @@ -233,7 +221,41 @@ impl StoreCommand { } } - // Write the accounts to disk + // Bootstrap with KMS key or local key. + let signer = validator_key.into_signer().await?; + match signer { + ValidatorSigner::Kms(signer) => { + Self::bootstrap_accounts_and_store( + config, + signer, + accounts_directory, + data_directory, + ) + .await + }, + ValidatorSigner::Local(signer) => { + Self::bootstrap_accounts_and_store( + config, + signer, + accounts_directory, + data_directory, + ) + .await + }, + } + } + + /// Builds the genesis state of the chain, writes accounts to file, and bootstraps the store. + async fn bootstrap_accounts_and_store( + config: GenesisConfig, + signer: impl BlockSigner, + accounts_directory: &Path, + data_directory: &Path, + ) -> anyhow::Result<()> { + // Build genesis state with the provided signer. + let (genesis_state, secrets) = config.into_state(signer)?; + + // Write accounts to file. for item in secrets.as_account_files(&genesis_state) { let AccountFileWithName { account_file, name } = item?; let accountpath = accounts_directory.join(name); @@ -246,6 +268,7 @@ impl StoreCommand { account_file.write(accountpath)?; } - Store::bootstrap(genesis_state, data_directory) + // Bootstrap store. + Store::bootstrap(genesis_state, data_directory).await } } diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index 461e446c1..ef3d9363a 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -1,9 +1,10 @@ +use std::net::SocketAddr; use std::path::PathBuf; use std::time::Duration; use anyhow::Context; use miden_node_utils::grpc::UrlExt; -use miden_node_validator::Validator; +use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use url::Url; @@ -13,6 +14,7 @@ use crate::commands::{ ENV_DATA_DIRECTORY, ENV_ENABLE_OTEL, ENV_VALIDATOR_KEY, + ENV_VALIDATOR_KMS_KEY_ID, ENV_VALIDATOR_URL, INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, @@ -49,26 +51,63 @@ pub enum ValidatorCommand { /// Insecure, hex-encoded validator secret key for development and testing purposes. /// /// If not provided, a predefined key is used. - #[arg(long = "key", env = ENV_VALIDATOR_KEY, value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] + /// + /// Cannot be used with `key.kms-id`. + #[arg( + long = "key.hex", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX, + group = "key" + )] validator_key: String, + + /// Key ID for the KMS key used by validator to sign blocks. + /// + /// Cannot be used with `key.hex`. + #[arg( + long = "key.kms-id", + env = ENV_VALIDATOR_KMS_KEY_ID, + value_name = "VALIDATOR_KMS_KEY_ID", + group = "key" + )] + kms_key_id: Option, }, } impl ValidatorCommand { + /// Runs the validator command. pub async fn handle(self) -> anyhow::Result<()> { let Self::Start { url, grpc_timeout, validator_key, data_directory, + kms_key_id, .. } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - let signer = SecretKey::read_from_bytes(hex::decode(validator_key)?.as_ref())?; + // Run validator with KMS key backend if key id provided. + if let Some(kms_key_id) = kms_key_id { + let signer = ValidatorSigner::new_kms(kms_key_id).await?; + Self::serve(address, grpc_timeout, signer, data_directory).await + } else { + let signer = SecretKey::read_from_bytes(hex::decode(validator_key)?.as_ref())?; + let signer = ValidatorSigner::new_local(signer); + Self::serve(address, grpc_timeout, signer, data_directory).await + } + } + /// Runs the validator component until failure. + async fn serve( + address: SocketAddr, + grpc_timeout: Duration, + signer: ValidatorSigner, + data_directory: PathBuf, + ) -> anyhow::Result<()> { Validator { address, grpc_timeout, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 3b80481bb..c77d50c77 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -93,7 +93,9 @@ pub async fn seed_store( let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); let signer = EcdsaSecretKey::new(); let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1, signer); - Store::bootstrap(genesis_state.clone(), &data_directory).expect("store should bootstrap"); + Store::bootstrap(genesis_state.clone(), &data_directory) + .await + .expect("store should bootstrap"); // start the store let (_, store_url) = start_store(data_directory.clone()).await; @@ -103,7 +105,7 @@ pub async fn seed_store( let accounts_filepath = data_directory.join(ACCOUNTS_FILENAME); let data_directory = miden_node_store::DataDirectory::load(data_directory).expect("data directory should exist"); - let genesis_header = genesis_state.into_block().unwrap().into_inner(); + let genesis_header = genesis_state.into_block().await.unwrap().into_inner(); let metrics = generate_blocks( num_accounts, public_accounts_percentage, diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 8c98e9da4..b97a946a9 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -4,7 +4,7 @@ use std::time::Duration; use miden_node_proto::generated::block_producer::api_client as block_producer_client; use miden_node_store::{GenesisState, Store}; use miden_node_utils::fee::test_fee_params; -use miden_node_validator::Validator; +use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::testing::random_signer::RandomBlockSigner as _; use tokio::net::TcpListener; @@ -49,7 +49,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { Validator { address: validator_addr, grpc_timeout, - signer: SecretKey::random(), + signer: ValidatorSigner::new_local(SecretKey::random()), data_directory, } .serve() @@ -131,7 +131,9 @@ async fn start_store( data_directory: &std::path::Path, ) -> runtime::Runtime { let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, SecretKey::random()); - Store::bootstrap(genesis_state.clone(), data_directory).expect("store should bootstrap"); + Store::bootstrap(genesis_state.clone(), data_directory) + .await + .expect("store should bootstrap"); let dir = data_directory.to_path_buf(); let rpc_listener = diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index e70d14563..9c69d2818 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -407,7 +407,9 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { let config = GenesisConfig::default(); let signer = SecretKey::new(); let (genesis_state, _) = config.into_state(signer).unwrap(); - Store::bootstrap(genesis_state.clone(), data_directory.path()).expect("store should bootstrap"); + Store::bootstrap(genesis_state.clone(), data_directory.path()) + .await + .expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -436,7 +438,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { ( store_runtime, data_directory, - genesis_state.into_block().unwrap().inner().header().commitment(), + genesis_state.into_block().await.unwrap().inner().header().commitment(), ) } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index fed714335..9bffbb5c2 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1419,9 +1419,9 @@ fn test_select_account_code_by_commitment_multiple_codes() { // ================================================================================================ /// Verifies genesis block with account containing vault assets can be inserted. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_account_assets() { +async fn genesis_with_account_assets() { use crate::genesis::GenesisState; let component_code = "pub proc foo push.1 end"; @@ -1446,15 +1446,15 @@ fn genesis_with_account_assets() { let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); - let genesis_block = genesis_state.into_block().unwrap(); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } /// Verifies genesis block with account containing storage maps can be inserted. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_account_storage_map() { +async fn genesis_with_account_storage_map() { use miden_protocol::account::StorageMap; use crate::genesis::GenesisState; @@ -1495,15 +1495,15 @@ fn genesis_with_account_storage_map() { let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); - let genesis_block = genesis_state.into_block().unwrap(); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } /// Verifies genesis block with account containing both vault assets and storage maps. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_account_assets_and_storage() { +async fn genesis_with_account_assets_and_storage() { use miden_protocol::account::StorageMap; use crate::genesis::GenesisState; @@ -1542,16 +1542,16 @@ fn genesis_with_account_assets_and_storage() { let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); - let genesis_block = genesis_state.into_block().unwrap(); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } /// Verifies genesis block with multiple accounts of different types. /// Tests realistic genesis scenario with basic accounts, assets, and storage. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_multiple_accounts() { +async fn genesis_with_multiple_accounts() { use miden_protocol::account::StorageMap; use crate::genesis::GenesisState; @@ -1620,7 +1620,7 @@ fn genesis_with_multiple_accounts() { 0, SecretKey::random(), ); - let genesis_block = genesis_state.into_block().unwrap(); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index a277f1c68..397c17386 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -17,7 +17,6 @@ use miden_protocol::errors::{ AccountTreeError, AssetError, AssetVaultError, - FeeError, NoteError, NullifierTreeError, StorageMapError, @@ -139,22 +138,6 @@ pub enum StateInitializationError { AccountToDeltaConversionFailed(String), } -#[derive(Debug, Error)] -pub enum GenesisError { - // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES - // --------------------------------------------------------------------------------------------- - #[error("database error")] - Database(#[from] DatabaseError), - #[error("failed to build genesis account tree")] - AccountTree(#[source] AccountTreeError), - #[error("failed to deserialize genesis file")] - GenesisFileDeserialization(#[from] DeserializationError), - #[error("fee cannot be created")] - Fee(#[from] FeeError), - #[error("failed to build account delta from account")] - AccountDelta(AccountError), -} - // ENDPOINT ERRORS // ================================================================================================= #[derive(Error, Debug)] @@ -582,7 +565,6 @@ mod compile_tests { AccountError, DatabaseError, DeserializationError, - GenesisError, NetworkAccountError, NoteError, RecvError, @@ -613,7 +595,6 @@ mod compile_tests { ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); - ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); ensure_is_error::>(PhantomData); } diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index acdeb304c..926e757b7 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -68,9 +68,9 @@ fn parsing_yields_expected_default_values() -> TestResult { Ok(()) } -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_accounts_have_nonce_one() -> TestResult { +async fn genesis_accounts_have_nonce_one() -> TestResult { let gcfg = GenesisConfig::default(); let (state, secrets) = gcfg.into_state(SecretKey::new()).unwrap(); let mut iter = secrets.as_account_files(&state); @@ -79,7 +79,7 @@ fn genesis_accounts_have_nonce_one() -> TestResult { assert_eq!(status_quo.account.nonce(), ONE); - let _block = state.into_block()?; + let _block = state.into_block().await?; Ok(()) } @@ -288,9 +288,9 @@ path = "does_not_exist.mac" ); } -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn parsing_agglayer_sample_with_account_files() -> TestResult { +async fn parsing_agglayer_sample_with_account_files() -> TestResult { use miden_protocol::account::AccountType; // Use the actual sample file path since it references relative .mac files @@ -350,7 +350,7 @@ fn parsing_agglayer_sample_with_account_files() -> TestResult { assert_eq!(secrets.secrets.len(), 1, "Only native faucet should generate a secret"); // Verify the genesis state can be converted to a block - let _block = state.into_block()?; + let _block = state.into_block().await?; Ok(()) } diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index 5df1825d6..08d68fe1b 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -1,3 +1,4 @@ +use miden_node_utils::signer::BlockSigner; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{Account, AccountDelta}; @@ -9,17 +10,15 @@ use miden_protocol::block::{ BlockNoteTree, BlockNumber, BlockProof, - BlockSigner, FeeParameters, ProvenBlock, }; use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks}; use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage, Smt}; +use miden_protocol::errors::AccountError; use miden_protocol::note::Nullifier; use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionKernel}; -use crate::errors::GenesisError; - pub mod config; // GENESIS STATE @@ -68,17 +67,14 @@ impl GenesisState { } impl GenesisState { - /// Returns the block header and the account SMT - pub fn into_block(self) -> Result { + /// Returns the block header and the account SMT. + pub async fn into_block(self) -> anyhow::Result { let accounts: Vec = self .accounts .iter() .map(|account| { let account_update_details = if account.id().is_public() { - AccountUpdateDetails::Delta( - AccountDelta::try_from(account.clone()) - .map_err(GenesisError::AccountDelta)?, - ) + AccountUpdateDetails::Delta(AccountDelta::try_from(account.clone())?) } else { AccountUpdateDetails::Private }; @@ -89,7 +85,7 @@ impl GenesisState { account_update_details, )) }) - .collect::, GenesisError>>()?; + .collect::, AccountError>>()?; // Convert account updates to SMT entries using account_id_to_smt_key let smt_entries = accounts.iter().map(|update| { @@ -134,7 +130,10 @@ impl GenesisState { let block_proof = BlockProof::new_dummy(); - let signature = self.block_signer.sign(&header); + // Sign and assert verification for sanity (no mismatch between frontend and backend signing + // impls). + let signature = self.block_signer.sign(&header).await?; + assert!(signature.verify(header.commitment(), &self.block_signer.public_key())); // SAFETY: Header and accounts should be valid by construction. // No notes or nullifiers are created at genesis, which is consistent with the above empty // block note tree root and empty nullifier tree root. diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 3a284ceff..8c828f116 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -11,8 +11,8 @@ use miden_node_proto_build::{ store_rpc_api_descriptor, }; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; +use miden_node_utils::signer::BlockSigner; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_protocol::block::BlockSigner; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; @@ -54,12 +54,13 @@ impl Store { skip_all, err, )] - pub fn bootstrap( + pub async fn bootstrap( genesis: GenesisState, data_directory: &Path, ) -> anyhow::Result<()> { let genesis = genesis .into_block() + .await .context("failed to convert genesis configuration into the genesis block")?; let data_directory = diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index abf785263..af86ccbb9 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -8,6 +8,7 @@ pub mod limiter; pub mod logging; pub mod lru_cache; pub mod panic; +pub mod signer; pub mod tracing; pub trait ErrorReport: std::error::Error { diff --git a/crates/utils/src/signer.rs b/crates/utils/src/signer.rs new file mode 100644 index 000000000..00dbe3ebc --- /dev/null +++ b/crates/utils/src/signer.rs @@ -0,0 +1,36 @@ +use core::convert::Infallible; +use core::error; + +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, SecretKey, Signature}; + +// BLOCK SIGNER +// ================================================================================================ + +/// Trait which abstracts the signing of block headers with ECDSA signatures. +/// +/// Production-level implementations will involve some sort of secure remote backend. The trait also +/// allows for testing with local and ephemeral signers. +pub trait BlockSigner { + type Error: error::Error + Send + Sync + 'static; + fn sign( + &self, + header: &BlockHeader, + ) -> impl Future> + Send; + fn public_key(&self) -> PublicKey; +} + +// SECRET KEY BLOCK SIGNER +// ================================================================================================ + +impl BlockSigner for SecretKey { + type Error = Infallible; + + async fn sign(&self, header: &BlockHeader) -> Result { + Ok(self.sign(header.commitment())) + } + + fn public_key(&self) -> PublicKey { + self.public_key() + } +} diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 570f2a8d2..1b4db7e04 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -18,8 +18,11 @@ workspace = true [dependencies] anyhow = { workspace = true } +aws-config = { version = "1.8.14" } +aws-sdk-kms = { version = "1.100" } diesel = { workspace = true } diesel_migrations = { workspace = true } +k256 = "0.13.4" miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index 954d043b8..97b61fabc 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -1,12 +1,12 @@ use miden_node_db::{DatabaseError, Db}; -use miden_protocol::block::{BlockSigner, ProposedBlock}; +use miden_protocol::block::ProposedBlock; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::errors::ProposedBlockError; use miden_protocol::transaction::{TransactionHeader, TransactionId}; use tracing::{info_span, instrument}; -use crate::COMPONENT; use crate::db::find_unvalidated_transactions; +use crate::{COMPONENT, ValidatorSigner}; // BLOCK VALIDATION ERROR // ================================================================================================ @@ -17,6 +17,8 @@ pub enum BlockValidationError { UnvalidatedTransactions(Vec), #[error("failed to build block")] BlockBuildingFailed(#[source] ProposedBlockError), + #[error("failed to sign block: {0}")] + BlockSigningFailed(String), #[error("failed to select transactions")] DatabaseError(#[source] DatabaseError), } @@ -27,9 +29,9 @@ pub enum BlockValidationError { /// Validates a block by checking that all transactions in the proposed block have been processed by /// the validator in the past. #[instrument(target = COMPONENT, skip_all, err)] -pub async fn validate_block( +pub async fn validate_block( proposed_block: ProposedBlock, - signer: &S, + signer: &ValidatorSigner, db: &Db, ) -> Result { // Search for any proposed transactions that have not previously been validated. @@ -53,7 +55,10 @@ pub async fn validate_block( .map_err(BlockValidationError::BlockBuildingFailed)?; // Sign the header. - let signature = info_span!("sign_block").in_scope(|| signer.sign(&header)); + let signature = info_span!("sign_block") + .in_scope(async move || signer.sign(&header).await) + .await + .map_err(|err| BlockValidationError::BlockSigningFailed(err.to_string()))?; Ok(signature) } diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index a987304c3..44f883bfc 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,9 +1,11 @@ mod block_validation; mod db; mod server; +mod signers; mod tx_validation; pub use server::Validator; +pub use signers::ValidatorSigner; // CONSTANTS // ================================================================================================= diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 7f71161a2..ac4b56e51 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -12,7 +12,7 @@ use miden_node_utils::ErrorReport; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_protocol::block::{BlockSigner, ProposedBlock}; +use miden_protocol::block::ProposedBlock; use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; @@ -22,10 +22,10 @@ use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; use tracing::{info_span, instrument}; -use crate::COMPONENT; use crate::block_validation::validate_block; use crate::db::{insert_transaction, load}; use crate::tx_validation::validate_transaction; +use crate::{COMPONENT, ValidatorSigner}; // VALIDATOR // ================================================================================ @@ -33,7 +33,7 @@ use crate::tx_validation::validate_transaction; /// The handle into running the gRPC validator server. /// /// Facilitates the running of the gRPC server which implements the validator API. -pub struct Validator { +pub struct Validator { /// The address of the validator component. pub address: SocketAddr, /// Server-side timeout for an individual gRPC request. @@ -42,13 +42,13 @@ pub struct Validator { pub grpc_timeout: Duration, /// The signer used to sign blocks. - pub signer: S, + pub signer: ValidatorSigner, /// The data directory for the validator component's database files. pub data_directory: PathBuf, } -impl Validator { +impl Validator { /// Serves the validator RPC API. /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is @@ -99,19 +99,19 @@ impl Validator { /// The underlying implementation of the gRPC validator server. /// /// Implements the gRPC API for the validator. -struct ValidatorServer { - signer: S, +struct ValidatorServer { + signer: ValidatorSigner, db: Arc, } -impl ValidatorServer { - fn new(signer: S, db: Db) -> Self { +impl ValidatorServer { + fn new(signer: ValidatorSigner, db: Db) -> Self { Self { signer, db: db.into() } } } #[tonic::async_trait] -impl api_server::Api for ValidatorServer { +impl api_server::Api for ValidatorServer { /// Returns the status of the validator. async fn status( &self, diff --git a/crates/validator/src/signers/kms.rs b/crates/validator/src/signers/kms.rs new file mode 100644 index 000000000..1d52d4e24 --- /dev/null +++ b/crates/validator/src/signers/kms.rs @@ -0,0 +1,125 @@ +use anyhow::Context; +use aws_sdk_kms::error::SdkError; +use aws_sdk_kms::operation::sign::SignError; +use aws_sdk_kms::types::SigningAlgorithmSpec; +use k256::PublicKey as K256PublicKey; +use k256::elliptic_curve::sec1::ToEncodedPoint; +use k256::pkcs8::DecodePublicKey as _; +use miden_node_utils::signer::BlockSigner; +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; +use miden_protocol::crypto::hash::keccak::Keccak256; +use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; + +// KMS SIGNER ERROR +// ================================================================================================ + +#[derive(Debug, thiserror::Error)] +pub enum KmsSignerError { + /// The KMS backend errored out. + #[error("KMS service failure")] + KmsServiceError(#[source] Box>), + /// The KMS backend did not error but returned an empty signature. + #[error("KMS request returned an empty result")] + EmptyBlob, + /// The KMS backend returned a signature with an invalid format. + #[error("k256 signature error")] + K256Error(#[source] k256::ecdsa::Error), + /// The KMS backend returned a signature with an invalid format. + #[error("invalid signature format")] + SignatureFormatError(#[source] DeserializationError), +} + +// KMS SIGNER +// ================================================================================================ + +/// Block signer that uses AWS KMS to create signatures. +pub struct KmsSigner { + key_id: String, + pub_key: PublicKey, + client: aws_sdk_kms::Client, +} + +impl KmsSigner { + /// Constructs a new KMS signer and retrieves the corresponding public key from the AWS backend. + /// + /// The supplied `key_id` must be a valid AWS KMS key ID in the AWS region corresponding to the + /// typical `AWS_REGION` env var. + /// + /// A policy statement such as the following is required to allow a process on an EC2 instance + /// to use this signer: + /// ```json + /// { + /// "Sid": "AllowEc2RoleUseOfKey", + /// "Effect": "Allow", + /// "Principal": { + /// "AWS": "arn:aws:iam:::role/" + /// }, + /// "Action": [ + /// "kms:Sign", + /// "kms:Verify", + /// "kms:DescribeKey" + /// "kms:GetPublicKey" + /// ], + /// "Resource": "*" + /// }, + /// ``` + pub async fn new(key_id: impl Into) -> anyhow::Result { + let version = aws_config::BehaviorVersion::v2026_01_12(); + let config = aws_config::load_defaults(version).await; + let client = aws_sdk_kms::Client::new(&config); + let key_id = key_id.into(); + + // Retrieve DER-encoded SPKI. + let pub_key_output = client.get_public_key().key_id(key_id.clone()).send().await?; + let spki_der = pub_key_output.public_key().ok_or(KmsSignerError::EmptyBlob)?.as_ref(); + + // Decode the DER-encoded SPKI and compress it. + let kpub = K256PublicKey::from_public_key_der(spki_der) + .context("failed to parse SPKI as secp256k1")?; + let compressed = kpub.to_encoded_point(true); // 33 bytes, 0x02/0x03 || X. + let sec1_compressed = compressed.as_bytes(); + + // Decode the compressed SPKI as a Miden public key. + let pub_key = PublicKey::read_from_bytes(sec1_compressed)?; + Ok(Self { key_id, pub_key, client }) + } +} + +impl BlockSigner for KmsSigner { + type Error = KmsSignerError; + + async fn sign(&self, header: &BlockHeader) -> Result { + // The Validator produces Ethereum-style ECDSA (secp256k1) signatures over Keccak-256 + // digests. AWS KMS does not support SHA-3 hashing for ECDSA keys + // (ECC_SECG_P256K1 being the corresponding AWS key-spec), so we pre-hash the + // message and pass MessageType::Digest. KMS signs the provided 32-byte digest + // verbatim. + let msg = header.commitment().to_bytes(); + let digest = Keccak256::hash(&msg); + + // Request signature from KMS backend. + let sign_output = self + .client + .sign() + .key_id(&self.key_id) + .signing_algorithm(SigningAlgorithmSpec::EcdsaSha256) + .message_type(aws_sdk_kms::types::MessageType::Digest) + .message(digest.to_bytes().into()) + .send() + .await + .map_err(Box::from) + .map_err(KmsSignerError::KmsServiceError)?; + + // Decode DER-encoded signature. + let sig_der = sign_output.signature().ok_or(KmsSignerError::EmptyBlob)?; + // Recovery id is not used by verify(pk), so 0 is fine. + let recovery_id = 0; + Signature::from_der(sig_der.as_ref(), recovery_id) + .map_err(KmsSignerError::SignatureFormatError) + } + + fn public_key(&self) -> PublicKey { + self.pub_key.clone() + } +} diff --git a/crates/validator/src/signers/mod.rs b/crates/validator/src/signers/mod.rs new file mode 100644 index 000000000..9656e045c --- /dev/null +++ b/crates/validator/src/signers/mod.rs @@ -0,0 +1,44 @@ +mod kms; +pub use kms::KmsSigner; +use miden_node_utils::signer::BlockSigner; +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{SecretKey, Signature}; + +// VALIDATOR SIGNER +// ================================================================================================= + +/// Signer that the Validator uses to sign blocks. +pub enum ValidatorSigner { + Kms(KmsSigner), + Local(SecretKey), +} + +impl ValidatorSigner { + /// Constructs a signer which uses an AWS KMS key for signing. + /// + /// See [`KmsSigner::new`] for details as to env var configuration and AWS IAM policies required + /// to use this functionality. + pub async fn new_kms(key_id: impl Into) -> anyhow::Result { + let kms_signer = KmsSigner::new(key_id).await?; + Ok(Self::Kms(kms_signer)) + } + + /// Constructs a signer which uses a local secret key for signing. + pub fn new_local(secret_key: SecretKey) -> Self { + Self::Local(secret_key) + } + + /// Signs a block header using the configured signer. + pub async fn sign(&self, header: &BlockHeader) -> anyhow::Result { + match self { + Self::Kms(signer) => { + let sig = signer.sign(header).await?; + Ok(sig) + }, + Self::Local(signer) => { + let sig = ::sign(signer, header).await?; + Ok(sig) + }, + } + } +} From 3b9298cbebfe67d8baa3de0c29c190929dfa6de8 Mon Sep 17 00:00:00 2001 From: Marti Date: Thu, 26 Feb 2026 18:56:49 +0100 Subject: [PATCH 57/77] chore: migrate to base 0904e2c61 (#1691) --- CHANGELOG.md | 2 +- Cargo.lock | 547 +++++++++--------- bin/network-monitor/src/counter.rs | 12 +- bin/network-monitor/src/deploy/counter.rs | 6 +- bin/node/Dockerfile | 2 +- bin/stress-test/src/seeding/mod.rs | 15 +- crates/block-producer/src/server/tests.rs | 7 +- .../src/db/models/queries/tests.rs | 18 +- crates/proto/src/domain/note.rs | 2 +- crates/rpc/src/tests.rs | 2 +- .../store/src/db/models/queries/accounts.rs | 6 +- .../src/db/models/queries/accounts/tests.rs | 61 +- crates/store/src/db/models/queries/notes.rs | 2 +- crates/store/src/db/tests.rs | 171 +++--- crates/store/src/genesis/config/mod.rs | 22 +- .../agglayer_faucet_eth.mac | Bin 8521 -> 17931 bytes .../agglayer_faucet_usdc.mac | Bin 8521 -> 17931 bytes .../samples/02-with-account-files/bridge.mac | Bin 8346 -> 17955 bytes crates/store/src/genesis/config/tests.rs | 7 +- crates/store/src/genesis/mod.rs | 2 +- crates/store/src/inner_forest/tests.rs | 10 +- 21 files changed, 458 insertions(+), 436 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8798870f..9016482a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). - Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). - Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/miden-node/issues/1701)). +- Added KMS signing support in validator ([#1677](https://github.com/0xMiden/miden-node/pull/1677)). ### Changes @@ -24,7 +25,6 @@ - Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). - [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). -- Added KMS signing support in validator ([#1677](https://github.com/0xMiden/miden-node/pull/1677)). ## v0.13.7 (2026-02-25) diff --git a/Cargo.lock b/Cargo.lock index f2df1dd16..a08d6641e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,12 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.102" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +dependencies = [ + "backtrace", +] [[package]] name = "arrayref" @@ -157,7 +160,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -204,9 +207,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.13" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d203b0bf2626dcba8665f5cd0871d7c2c0930223d6b6be9097592fea21242d0" +checksum = "e26bbf46abc608f2dc61fd6cb3b7b0665497cc259a21520151ed98f8b37d2c79" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -216,9 +219,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.16.0" +version = "1.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" +checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" dependencies = [ "aws-lc-sys", "zeroize", @@ -238,9 +241,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.7.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede2ddc593e6c8acc6ce3358c28d6677a6dc49b65ba4b37a2befe14a11297e75" +checksum = "b0f92058d22a46adf53ec57a6a96f34447daf02bff52e8fb956c66bcd5c6ac12" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -263,9 +266,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.102.0" +version = "1.100.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22b682ef733ec24c300b11cec2df9bfea7ee4bf48ab2030c832e27db92b69c68" +checksum = "723700afe7459a33d1ac30852e9208b801946c032625cc8c808f57b9563bb5c7" dependencies = [ "aws-credential-types", "aws-runtime", @@ -287,9 +290,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.95.0" +version = "1.94.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c5ff27c6ba2cbd95e6e26e2e736676fdf6bcf96495b187733f521cfe4ce448" +checksum = "699da1961a289b23842d88fe2984c6ff68735fdf9bdcbc69ceaeb2491c9bf434" dependencies = [ "aws-credential-types", "aws-runtime", @@ -311,9 +314,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.97.0" +version = "1.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d186f1e5a3694a188e5a0640b3115ccc6e084d104e16fd6ba968dca072ffef8" +checksum = "e3e3a4cb3b124833eafea9afd1a6cc5f8ddf3efefffc6651ef76a03cbc6b4981" dependencies = [ "aws-credential-types", "aws-runtime", @@ -335,9 +338,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.99.0" +version = "1.98.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9acba7c62f3d4e2408fa998a3a8caacd8b9a5b5549cf36e2372fbdae329d5449" +checksum = "89c4f19655ab0856375e169865c91264de965bd74c407c7f1e403184b1049409" dependencies = [ "aws-credential-types", "aws-runtime", @@ -360,9 +363,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.4.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37411f8e0f4bea0c3ca0958ce7f18f6439db24d555dbd809787262cd00926aa9" +checksum = "68f6ae9b71597dc5fd115d52849d7a5556ad9265885ad3492ea8d73b93bbc46e" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -382,9 +385,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.2.13" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc50d0f63e714784b84223abd7abbc8577de8c35d699e0edd19f0a88a08ae13" +checksum = "3cba48474f1d6807384d06fec085b909f5807e16653c5af5c45dfe89539f0b70" dependencies = [ "futures-util", "pin-project-lite", @@ -393,9 +396,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.63.5" +version = "0.63.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d619373d490ad70966994801bc126846afaa0d1ee920697a031f0cf63f2568e7" +checksum = "af4a8a5fe3e4ac7ee871237c340bbce13e982d37543b65700f4419e039f5d78e" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -414,9 +417,9 @@ dependencies = [ [[package]] name = "aws-smithy-http-client" -version = "1.1.11" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00ccbb08c10f6bcf912f398188e42ee2eab5f1767ce215a02a73bc5df1bbdd95" +checksum = "0709f0083aa19b704132684bc26d3c868e06bd428ccc4373b0b55c3e8748a58b" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -433,7 +436,7 @@ dependencies = [ "hyper-util", "pin-project-lite", "rustls 0.21.12", - "rustls 0.23.37", + "rustls 0.23.36", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -472,9 +475,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.10.2" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ccf7f6eba8b2dcf8ce9b74806c6c185659c311665c4bf8d6e71ebd454db6bf" +checksum = "8fd3dfc18c1ce097cf81fced7192731e63809829c6cbf933c1ec47452d08e1aa" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -497,9 +500,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.11.5" +version = "1.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4af6e5def28be846479bbeac55aa4603d6f7986fc5da4601ba324dd5d377516" +checksum = "8c55e0837e9b8526f49e0b9bfa9ee18ddee70e853f5bc09c5d11ebceddcb0fec" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -514,9 +517,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.4.5" +version = "1.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca2734c16913a45343b37313605d84e7d8b34a4611598ce1d25b35860a2bed3" +checksum = "576b0d6991c9c32bc14fc340582ef148311f924d41815f641a308b5d11e8e7cd" dependencies = [ "base64-simd", "bytes", @@ -549,9 +552,9 @@ dependencies = [ [[package]] name = "aws-types" -version = "1.3.13" +version = "1.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0470cc047657c6e286346bdf10a8719d26efd6a91626992e0e64481e44323e96" +checksum = "6c50f3cdf47caa8d01f2be4a6663ea02418e892f9bbfd82c7b9a3a37eaccdd3a" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -699,13 +702,13 @@ dependencies = [ "bitflags", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.10.5", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -725,9 +728,9 @@ checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "blake3" @@ -754,9 +757,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.20.2" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "byteorder" @@ -798,9 +801,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.56" +version = "1.2.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" dependencies = [ "find-msvc-tools", "jobserver", @@ -861,9 +864,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.44" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", @@ -923,9 +926,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.60" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" +checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" dependencies = [ "clap_builder", "clap_derive", @@ -933,9 +936,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.60" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" dependencies = [ "anstream", "anstyle", @@ -952,14 +955,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "clap_lex" -version = "1.0.0" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" @@ -1146,7 +1149,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1170,7 +1173,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1181,7 +1184,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1237,9 +1240,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.8" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", ] @@ -1262,7 +1265,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1292,7 +1295,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1312,7 +1315,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1341,7 +1344,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1367,7 +1370,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1442,9 +1445,9 @@ dependencies = [ [[package]] name = "ena" -version = "0.14.4" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabffdaee24bd1bf95c5ef7cec31260444317e72ea56c4c91750e8b7ee58d5f1" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" dependencies = [ "log", ] @@ -1460,9 +1463,9 @@ dependencies = [ [[package]] name = "env_filter" -version = "1.0.0" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" +checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" dependencies = [ "log", "regex", @@ -1470,9 +1473,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.9" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" dependencies = [ "anstream", "anstyle", @@ -1521,9 +1524,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "find-msvc-tools" -version = "0.1.9" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixedbitset" @@ -1572,9 +1575,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.3.0" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" +checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" dependencies = [ "autocfg", ] @@ -1587,9 +1590,9 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "futures" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1602,9 +1605,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1612,15 +1615,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1629,32 +1632,32 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "futures-sink" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1664,9 +1667,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1676,6 +1679,7 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", + "pin-utils", "slab", ] @@ -2022,7 +2026,7 @@ dependencies = [ "http 1.4.0", "hyper 1.8.1", "hyper-util", - "rustls 0.23.37", + "rustls 0.23.36", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -2045,13 +2049,14 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.20" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64", "bytes", "futures-channel", + "futures-core", "futures-util", "http 1.4.0", "http-body 1.0.1", @@ -2070,9 +2075,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.65" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2261,6 +2266,15 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.13.0" @@ -2287,9 +2301,9 @@ checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jiff" -version = "0.2.21" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e3d65f018c6ae946ab16e80944b97096ed73c35b221d1c478a6c81d8f57940" +checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" dependencies = [ "jiff-static", "log", @@ -2300,13 +2314,13 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.21" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17c2b211d863c7fde02cbea8a3c1a439b98e109286554f2860bdded7ff83818" +checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2343,9 +2357,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.90" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14dc6f6450b3f6d4ed5b16327f38fed626d375a886159ca555bd7822c0c3a5a6" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2367,9 +2381,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -2385,7 +2399,7 @@ dependencies = [ "ena", "itertools 0.14.0", "lalrpop-util", - "petgraph 0.7.1", + "petgraph", "regex", "regex-syntax", "sha3", @@ -2418,9 +2432,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.182" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libloading" @@ -2465,9 +2479,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.24" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -2482,9 +2496,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.12.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" @@ -2529,7 +2543,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2599,19 +2613,20 @@ checksum = "120fa187be19d9962f0926633453784691731018a2bf936ddb4e29101b79c4a7" [[package]] name = "memchr" -version = "2.8.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miden-agglayer" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "fs-err", "miden-assembly", "miden-core", "miden-core-lib", + "miden-crypto", "miden-protocol", "miden-standards", "miden-utils-sync", @@ -2674,7 +2689,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2756,12 +2771,12 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.6" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550b5656b791fec59c0b6089b4d0368db746a34749ccd47e59afb01aa877e9e" +checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" dependencies = [ "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2826,7 +2841,7 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.117", + "syn 2.0.114", "terminal_size 0.3.0", "textwrap", "thiserror 2.0.18", @@ -2842,7 +2857,7 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2949,7 +2964,7 @@ name = "miden-node-grpc-error-macro" version = "0.14.0" dependencies = [ "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3126,7 +3141,7 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3204,7 +3219,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "bech32", "fs-err", @@ -3226,7 +3241,7 @@ dependencies = [ "semver 1.0.27", "serde", "thiserror 2.0.18", - "toml 0.9.12+spec-1.1.0", + "toml 0.9.11+spec-1.1.0", "walkdir", "winter-rand-utils", ] @@ -3234,11 +3249,11 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3312,7 +3327,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "fs-err", "miden-assembly", @@ -3329,7 +3344,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3337,6 +3352,7 @@ dependencies = [ "miden-assembly", "miden-block-prover", "miden-core-lib", + "miden-crypto", "miden-processor", "miden-protocol", "miden-standards", @@ -3351,7 +3367,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "miden-processor", "miden-protocol", @@ -3364,7 +3380,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" +source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" dependencies = [ "miden-protocol", "miden-tx", @@ -3465,7 +3481,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3475,7 +3491,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.9.12+spec-1.1.0", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -3608,7 +3624,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3769,9 +3785,9 @@ checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" [[package]] name = "owo-colors" -version = "4.3.0" +version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" +checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" [[package]] name = "page_size" @@ -3828,17 +3844,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "petgraph" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" -dependencies = [ - "fixedbitset", - "hashbrown 0.15.5", - "indexmap", -] - [[package]] name = "phf_shared" version = "0.11.3" @@ -3865,7 +3870,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3937,15 +3942,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.13.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "portable-atomic-util" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" dependencies = [ "portable-atomic", ] @@ -3997,7 +4002,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4020,9 +4025,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.10.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", @@ -4045,7 +4050,7 @@ checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4060,22 +4065,23 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.14.3" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck", "itertools 0.14.0", "log", "multimap", - "petgraph 0.8.3", + "once_cell", + "petgraph", "prettyplease", "prost", "prost-types", "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.117", + "syn 2.0.114", "tempfile", ] @@ -4089,7 +4095,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4106,9 +4112,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.14.3" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" dependencies = [ "prost", ] @@ -4142,9 +4148,9 @@ dependencies = [ [[package]] name = "pulldown-cmark" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c41efbf8f90ac44de7f3a868f0867851d261b56291732d0cbf7cceaaeb55a6" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ "bitflags", "memchr", @@ -4153,9 +4159,9 @@ dependencies = [ [[package]] name = "pulldown-cmark-to-cmark" -version = "22.0.0" +version = "21.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" +checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" dependencies = [ "pulldown-cmark", ] @@ -4178,7 +4184,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.37", + "rustls 0.23.36", "socket2 0.6.2", "thiserror 2.0.18", "tokio", @@ -4199,7 +4205,7 @@ dependencies = [ "rand", "ring", "rustc-hash", - "rustls 0.23.37", + "rustls 0.23.36", "rustls-pki-types", "slab", "thiserror 2.0.18", @@ -4333,9 +4339,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.12.3" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -4345,9 +4351,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.14" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -4362,9 +4368,9 @@ checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" [[package]] name = "regex-syntax" -version = "0.8.10" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "relative-path" @@ -4395,7 +4401,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.37", + "rustls 0.23.36", "rustls-pki-types", "rustls-platform-verifier", "serde", @@ -4482,7 +4488,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.117", + "syn 2.0.114", "unicode-ident", ] @@ -4526,19 +4532,19 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustix" -version = "1.1.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags", "errno", "libc", - "linux-raw-sys 0.12.1", + "linux-raw-sys 0.11.0", "windows-sys 0.61.2", ] @@ -4556,9 +4562,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.37" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "aws-lc-rs", "log", @@ -4603,7 +4609,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.37", + "rustls 0.23.36", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.103.9", @@ -4661,9 +4667,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.23" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -4736,9 +4742,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.7.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ "bitflags", "core-foundation 0.10.1", @@ -4749,9 +4755,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.17.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -4809,7 +4815,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4859,9 +4865,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.4.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911bd979bf1070a3f3aa7b691a3b3e9968f339ceeec89e08c280a8a22207a32f" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" dependencies = [ "futures-executor", "futures-util", @@ -4874,13 +4880,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.4.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a7d91949b85b0d2fb687445e448b40d322b6b3e4af6b44a29b21d9a5f33e6d9" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4947,9 +4953,9 @@ checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.12" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -5087,9 +5093,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.117" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -5113,14 +5119,14 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "system-configuration" -version = "0.7.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags", "core-foundation 0.9.4", @@ -5145,14 +5151,14 @@ checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" [[package]] name = "tempfile" -version = "3.26.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", - "getrandom 0.4.1", + "getrandom 0.3.4", "once_cell", - "rustix 1.1.4", + "rustix 1.1.3", "windows-sys 0.61.2", ] @@ -5190,7 +5196,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.1.4", + "rustix 1.1.3", "windows-sys 0.60.2", ] @@ -5237,7 +5243,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -5248,7 +5254,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -5351,7 +5357,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -5370,7 +5376,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.37", + "rustls 0.23.36", "tokio", ] @@ -5401,9 +5407,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.12+spec-1.1.0" +version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ "indexmap", "serde_core", @@ -5476,9 +5482,9 @@ checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", "axum", @@ -5507,21 +5513,21 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1882ac3bf5ef12877d7ed57aad87e75154c11931c2ba7e6cde5e22d63522c734" +checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "tonic-health" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4ff0636fef47afb3ec02818f5bceb4377b8abb9d6a386aeade18bd6212f8eb7" +checksum = "2a82868bf299e0a1d2e8dce0dc33a46c02d6f045b2c1f1d6cc8dc3d0bf1812ef" dependencies = [ "prost", "tokio", @@ -5532,9 +5538,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" dependencies = [ "bytes", "prost", @@ -5543,25 +5549,25 @@ dependencies = [ [[package]] name = "tonic-prost-build" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3144df636917574672e93d0f56d7edec49f90305749c668df5101751bb8f95a" +checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "prost-types", "quote", - "syn 2.0.117", + "syn 2.0.114", "tempfile", "tonic-build", ] [[package]] name = "tonic-reflection" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf0685a51e6d02b502ba0764002e766b7f3042aed13d9234925b6ffbfa3fca7" +checksum = "34da53e8387581d66db16ff01f98a70b426b091fdf76856e289d5c1bd386ed7b" dependencies = [ "prost", "prost-types", @@ -5573,9 +5579,9 @@ dependencies = [ [[package]] name = "tonic-web" -version = "0.14.5" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29453d84de05f4f1b573db22e6f9f6c95c189a6089a440c9a098aa9dea009299" +checksum = "75214f6b6bd28c19aa752ac09fdf0eea546095670906c21fe3940e180a4c43f2" dependencies = [ "base64", "bytes", @@ -5685,7 +5691,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -5777,9 +5783,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.116" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c635f0191bd3a2941013e5062667100969f8c4e9cd787c14f977265d73616e" +checksum = "3e17e807bff86d2a06b52bca4276746584a78375055b6e45843925ce2802b335" dependencies = [ "dissimilar", "glob", @@ -5788,7 +5794,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 1.0.3+spec-1.1.0", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -5811,9 +5817,9 @@ checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" -version = "1.0.24" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-linebreak" @@ -5888,9 +5894,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.21.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "js-sys", "wasm-bindgen", @@ -5983,9 +5989,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.113" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60722a937f594b7fde9adb894d7c092fc1bb6612897c46368d18e7a20208eff2" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -5996,9 +6002,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.63" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a89f4650b770e4521aa6573724e2aed4704372151bd0de9d16a3bbabb87441a" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", "futures-util", @@ -6010,9 +6016,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.113" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac8c6395094b6b91c4af293f4c79371c163f9a6f56184d2c9a85f5a95f3950" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6020,22 +6026,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.113" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3fabce6159dc20728033842636887e4877688ae94382766e00b180abac9d60" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.113" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0e091bdb824da87dc01d967388880d017a0a9bc4f3bdc0d86ee9f9336e3bb5" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -6089,9 +6095,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.90" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705eceb4ce901230f8625bd1d665128056ccbe4b7408faa625eec1ba80f59a97" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -6168,7 +6174,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -6179,7 +6185,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -6244,6 +6250,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.60.2" @@ -6566,7 +6581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -6657,7 +6672,7 @@ dependencies = [ "heck", "indexmap", "prettyplease", - "syn 2.0.117", + "syn 2.0.114", "wasm-metadata", "wit-bindgen-core", "wit-component", @@ -6673,7 +6688,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "wit-bindgen-core", "wit-bindgen-rust", ] @@ -6762,28 +6777,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.39" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.39" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -6803,7 +6818,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "synstructure", ] @@ -6843,11 +6858,11 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "zmij" -version = "1.0.21" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c2b9d0835..b1633b218 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -21,12 +21,10 @@ use miden_protocol::note::{ Note, NoteAssets, NoteAttachment, - NoteExecutionHint, NoteMetadata, NoteRecipient, NoteScript, NoteStorage, - NoteTag, NoteType, }; use miden_protocol::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; @@ -34,7 +32,7 @@ use miden_protocol::utils::Deserializable; use miden_protocol::{Felt, Word}; use miden_standards::account::interface::{AccountInterface, AccountInterfaceExt}; use miden_standards::code_builder::CodeBuilder; -use miden_standards::note::NetworkAccountTarget; +use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint}; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{LocalTransactionProver, TransactionExecutor}; @@ -858,12 +856,8 @@ fn create_network_note( .context("Failed to create NetworkAccountTarget for counter account")?; let attachment: NoteAttachment = target.into(); - let metadata = NoteMetadata::new( - wallet_account.id(), - NoteType::Public, - NoteTag::with_account_target(counter_account.id()), - ) - .with_attachment(attachment); + let metadata = + NoteMetadata::new(wallet_account.id(), NoteType::Public).with_attachment(attachment); let serial_num = Word::new([ Felt::new(rng.random()), diff --git a/bin/network-monitor/src/deploy/counter.rs b/bin/network-monitor/src/deploy/counter.rs index a5ab3d363..e517beb06 100644 --- a/bin/network-monitor/src/deploy/counter.rs +++ b/bin/network-monitor/src/deploy/counter.rs @@ -3,6 +3,7 @@ use std::path::Path; use anyhow::Result; +use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::{ Account, AccountBuilder, @@ -53,8 +54,9 @@ pub fn create_counter_account(owner_account_id: AccountId) -> Result { let component_code = CodeBuilder::default().compile_component_code("counter::program", script)?; - let account_code = AccountComponent::new(component_code, vec![counter_slot, owner_id_slot])? - .with_supports_all_types(); + let metadata = AccountComponentMetadata::new("counter::program").with_supports_all_types(); + let account_code = + AccountComponent::new(component_code, vec![counter_slot, owner_id_slot], metadata)?; let incr_nonce_auth: AccountComponent = IncrNonceAuthComponent.into(); diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 79464a987..30aef2637 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.91-slim-bullseye AS chef +FROM rust:1.91-slim-bookworm AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index c77d50c77..70cbf04fd 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -16,7 +16,6 @@ use miden_protocol::account::{ AccountBuilder, AccountDelta, AccountId, - AccountStorage, AccountStorageMode, AccountType, }; @@ -50,7 +49,7 @@ use miden_protocol::{Felt, ONE, Word}; use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::account::faucets::BasicFungibleFaucet; use miden_standards::account::wallets::BasicWallet; -use miden_standards::note::create_p2id_note; +use miden_standards::note::P2idNote; use rand::Rng; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use rayon::prelude::ParallelSlice; @@ -308,7 +307,7 @@ fn create_accounts_and_notes( /// specified `faucet_id` and sent to the specified target account. fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RpoRandomCoin) -> Note { let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 10).unwrap()); - create_p2id_note( + P2idNote::create( faucet_id, target_id, vec![asset], @@ -418,7 +417,7 @@ fn create_consume_note_tx( ProvenTransactionBuilder::new( account.id(), init_hash, - account.commitment(), + account.to_commitment(), account_delta_commitment, block_ref.block_num(), block_ref.commitment(), @@ -439,13 +438,13 @@ fn create_emit_note_tx( faucet: &mut Account, output_notes: Vec, ) -> ProvenTransaction { - let initial_account_hash = faucet.commitment(); + let initial_account_hash = faucet.to_commitment(); - let metadata_slot_name = AccountStorage::faucet_sysdata_slot(); + let metadata_slot_name = BasicFungibleFaucet::metadata_slot(); let slot = faucet.storage().get_item(metadata_slot_name).unwrap(); faucet .storage_mut() - .set_item(metadata_slot_name, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) + .set_item(metadata_slot_name, [slot[0] + Felt::new(10), slot[1], slot[2], slot[3]].into()) .unwrap(); faucet.increment_nonce(ONE).unwrap(); @@ -453,7 +452,7 @@ fn create_emit_note_tx( ProvenTransactionBuilder::new( faucet.id(), initial_account_hash, - faucet.commitment(), + faucet.to_commitment(), Word::empty(), block_ref.block_num(), block_ref.commitment(), diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index b97a946a9..63aa983db 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -5,8 +5,7 @@ use miden_node_proto::generated::block_producer::api_client as block_producer_cl use miden_node_store::{GenesisState, Store}; use miden_node_utils::fee::test_fee_params; use miden_node_validator::{Validator, ValidatorSigner}; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; -use miden_protocol::testing::random_signer::RandomBlockSigner as _; +use miden_protocol::testing::random_secret_key::random_secret_key; use tokio::net::TcpListener; use tokio::time::sleep; use tokio::{runtime, task}; @@ -49,7 +48,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { Validator { address: validator_addr, grpc_timeout, - signer: ValidatorSigner::new_local(SecretKey::random()), + signer: ValidatorSigner::new_local(random_secret_key()), data_directory, } .serve() @@ -130,7 +129,7 @@ async fn start_store( store_addr: std::net::SocketAddr, data_directory: &std::path::Path, ) -> runtime::Runtime { - let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, SecretKey::random()); + let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, random_secret_key()); Store::bootstrap(genesis_state.clone(), data_directory) .await .expect("store should bootstrap"); diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs index 0db95c018..7bd9b2cfe 100644 --- a/crates/ntx-builder/src/db/models/queries/tests.rs +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -4,15 +4,19 @@ use diesel::prelude::*; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::Word; -use miden_protocol::account::{AccountId, AccountStorageMode, AccountType}; +use miden_protocol::account::{ + AccountComponentMetadata, + AccountId, + AccountStorageMode, + AccountType, +}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::NoteExecutionHint; use miden_protocol::testing::account_id::{ ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE, AccountIdBuilder, }; use miden_protocol::transaction::TransactionId; -use miden_standards::note::NetworkAccountTarget; +use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint}; use miden_standards::testing::note::NoteBuilder; use rand_chacha::ChaCha20Rng; use rand_chacha::rand_core::SeedableRng; @@ -544,8 +548,12 @@ fn mock_account(_account_id: NetworkAccountId) -> miden_protocol::account::Accou .compile_component_code("test::interface", "pub proc test_proc push.1.2 add end") .unwrap(); - let component = - AccountComponent::new(component_code, vec![]).unwrap().with_supports_all_types(); + let component = AccountComponent::new( + component_code, + vec![], + AccountComponentMetadata::mock("test").with_supports_all_types(), + ) + .unwrap(); AccountBuilder::new([0u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 1f7c9cb0d..fa8425d98 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -71,7 +71,7 @@ impl TryFrom for NoteMetadata { .map_err(|err| ConversionError::deserialization_error("NoteAttachment", err))? }; - Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) + Ok(NoteMetadata::new(sender, note_type).with_tag(tag).with_attachment(attachment)) } } diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 9c69d2818..172f2266a 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -74,7 +74,7 @@ fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransa ProvenTransactionBuilder::new( account_id, [8; 32].try_into().unwrap(), - account.commitment(), + account.to_commitment(), delta.to_commitment(), 0.into(), Word::default(), diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 9b7a8e146..ee0c5747f 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -990,9 +990,9 @@ pub(crate) fn upsert_accounts( let account = Account::try_from(delta)?; debug_assert_eq!(account_id, account.id()); - if account.commitment() != update.final_state_commitment() { + if account.to_commitment() != update.final_state_commitment() { return Err(DatabaseError::AccountCommitmentsMismatch { - calculated: account.commitment(), + calculated: account.to_commitment(), expected: update.final_state_commitment(), }); } @@ -1135,7 +1135,7 @@ pub(crate) fn apply_delta( ) -> crate::db::Result { account.apply_delta(delta)?; - let actual_commitment = account.commitment(); + let actual_commitment = account.to_commitment(); if &actual_commitment != final_state_commitment { return Err(DatabaseError::AccountCommitmentsMismatch { calculated: actual_commitment, diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index dd1ab9748..e9f529855 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -14,6 +14,7 @@ use diesel::{ use diesel_migrations::MigrationHarness; use miden_node_utils::fee::test_fee_params; use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -143,9 +144,13 @@ fn create_test_account_with_storage() -> (Account, AccountId) { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -226,7 +231,7 @@ fn test_select_account_header_at_block_returns_correct_header() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -263,7 +268,7 @@ fn test_select_account_header_at_block_historical_query() { let delta_1 = AccountDelta::try_from(account.clone()).unwrap(); let account_update_1 = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta_1), ); @@ -302,7 +307,7 @@ fn test_select_account_vault_at_block_empty() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -329,7 +334,7 @@ fn test_upsert_accounts_inserts_storage_header() { let storage_commitment_original = account.storage().to_commitment(); let storage_slots_len = account.storage().slots().len(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); // Create full state delta from the account let delta = AccountDelta::try_from(account).unwrap(); @@ -383,7 +388,7 @@ fn test_upsert_accounts_updates_is_latest_flag() { // Save storage commitment before moving account let storage_commitment_1 = account.storage().to_commitment(); - let account_commitment_1 = account.commitment(); + let account_commitment_1 = account.to_commitment(); // First update with original account - full state delta let delta_1 = AccountDelta::try_from(account).unwrap(); @@ -406,9 +411,13 @@ fn test_upsert_accounts_updates_is_latest_flag() { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component_2 = AccountComponent::new(account_component_code, component_storage_modified) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component_2 = AccountComponent::new( + account_component_code, + component_storage_modified, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account_2 = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -419,7 +428,7 @@ fn test_upsert_accounts_updates_is_latest_flag() { .unwrap(); let storage_commitment_2 = account_2.storage().to_commitment(); - let account_commitment_2 = account_2.commitment(); + let account_commitment_2 = account_2.to_commitment(); // Second update with modified account - full state delta let delta_2 = AccountDelta::try_from(account_2).unwrap(); @@ -499,9 +508,13 @@ fn test_upsert_accounts_with_multiple_storage_slots() { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -515,7 +528,7 @@ fn test_upsert_accounts_with_multiple_storage_slots() { insert_block_header(&mut conn, block_num); let storage_commitment = account.storage().to_commitment(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); let delta = AccountDelta::try_from(account).unwrap(); let account_update = @@ -561,9 +574,13 @@ fn test_upsert_accounts_with_empty_storage() { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component = AccountComponent::new(account_component_code, vec![]) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component = AccountComponent::new( + account_component_code, + vec![], + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -577,7 +594,7 @@ fn test_upsert_accounts_with_empty_storage() { insert_block_header(&mut conn, block_num); let storage_commitment = account.storage().to_commitment(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); let delta = AccountDelta::try_from(account).unwrap(); let account_update = @@ -650,7 +667,7 @@ fn test_select_account_vault_at_block_historical_with_updates() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -756,7 +773,7 @@ fn test_select_account_vault_at_block_with_deletion() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 67b3a708b..49bdce419 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -752,7 +752,7 @@ impl TryInto for NoteMetadataRawRow { .map_err(miden_node_db::DatabaseError::conversiont_from_sql::)?; let tag = NoteTag::new(self.tag as u32); let attachment = NoteAttachment::read_from_bytes(&self.attachment)?; - Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) + Ok(NoteMetadata::new(sender, note_type).with_tag(tag).with_attachment(attachment)) } } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 9bffbb5c2..7bc633f95 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -5,6 +5,7 @@ use diesel::{Connection, SqliteConnection}; use miden_node_proto::domain::account::AccountSummary; use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -38,7 +39,6 @@ use miden_protocol::note::{ Note, NoteAttachment, NoteDetails, - NoteExecutionHint, NoteHeader, NoteId, NoteMetadata, @@ -52,7 +52,7 @@ use miden_protocol::testing::account_id::{ ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; -use miden_protocol::testing::random_signer::RandomBlockSigner; +use miden_protocol::testing::random_secret_key::random_secret_key; use miden_protocol::transaction::{ InputNoteCommitment, InputNotes, @@ -64,7 +64,7 @@ use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::code_builder::CodeBuilder; -use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; +use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint, P2idNote}; use pretty_assertions::assert_eq; use rand::Rng; @@ -192,7 +192,7 @@ pub fn create_note(account_id: AccountId) -> Note { let coin_seed: [u64; 4] = rand::rng().random(); let rng = Arc::new(Mutex::new(RpoRandomCoin::new(coin_seed.map(Felt::new).into()))); let mut rng = rng.lock().unwrap(); - create_p2id_note( + P2idNote::create( account_id, account_id, vec![Asset::Fungible( @@ -312,7 +312,7 @@ fn make_account_and_note( conn, &[BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), )], block_num, @@ -350,12 +350,8 @@ fn sql_unconsumed_network_notes() { note_index: BlockNoteIndex::new(0, i as usize).unwrap(), note_id: num_to_word(i.into()), note_commitment: num_to_word(i.into()), - metadata: NoteMetadata::new( - account_note.0, - NoteType::Public, - NoteTag::with_account_target(account_note.0), - ) - .with_attachment(attachment.clone()), + metadata: NoteMetadata::new(account_note.0, NoteType::Public) + .with_attachment(attachment.clone()), details: None, inclusion_path: SparseMerklePath::default(), }; @@ -820,7 +816,7 @@ fn notes() { let new_note = create_note(sender); let note_index = BlockNoteIndex::new(0, 2).unwrap(); let tag = 5u32; - let note_metadata = NoteMetadata::new(sender, NoteType::Public, tag.into()); + let note_metadata = NoteMetadata::new(sender, NoteType::Public).with_tag(tag.into()); let values = [(note_index, new_note.id(), ¬e_metadata)]; let notes_db = BlockNoteTree::with_entries(values).unwrap(); @@ -831,7 +827,7 @@ fn notes() { note_index, note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: NoteMetadata::new(sender, NoteType::Public, tag.into()), + metadata: NoteMetadata::new(sender, NoteType::Public).with_tag(tag.into()), details: Some(NoteDetails::from(&new_note)), inclusion_path: inclusion_path.clone(), }; @@ -1115,42 +1111,6 @@ fn select_storage_map_sync_values() { assert_eq!(page.values, expected, "should return latest values ordered by key"); } -#[test] -fn select_storage_map_sync_values_for_network_account() { - let mut conn = create_db(); - let block_num = BlockNumber::from(1); - create_block(&mut conn, block_num); - - let (account_id, _) = - make_account_and_note(&mut conn, block_num, [42u8; 32], AccountStorageMode::Network); - let slot_name = StorageSlotName::mock(7); - let key = num_to_word(1); - let value = num_to_word(10); - - queries::insert_account_storage_map_value( - &mut conn, - account_id, - block_num, - slot_name.clone(), - key, - value, - ) - .unwrap(); - - let page = queries::select_account_storage_map_values( - &mut conn, - account_id, - BlockNumber::GENESIS..=block_num, - ) - .unwrap(); - - assert_eq!( - page.values, - vec![StorageMapValue { block_num, slot_name, key, value }], - "network accounts with public state should be accepted", - ); -} - // UTILITIES // ------------------------------------------------------------------------------------------- fn num_to_word(n: u64) -> Word { @@ -1176,9 +1136,13 @@ fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { .compile_component_code("test::interface", code_str) .unwrap(); - let component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supported_type(AccountType::RegularAccountUpdatableCode); + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountUpdatableCode), + ) + .unwrap(); AccountBuilder::new(seed) .account_type(AccountType::RegularAccountUpdatableCode) @@ -1203,7 +1167,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader Word::try_from([num, num, 0, 0]).unwrap(), Word::try_from([0, 0, num, num]).unwrap(), ), - NoteMetadata::new(account_id, NoteType::Public, NoteTag::new(num as u32)), + NoteMetadata::new(account_id, NoteType::Public).with_tag(NoteTag::new(num as u32)), )]; TransactionHeader::new_unchecked( @@ -1270,9 +1234,12 @@ fn mock_account_code_and_storage( let account_component_code = CodeBuilder::default() .compile_component_code("counter_contract::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("counter_contract").with_supports_all_types(), + ) + .unwrap(); AccountBuilder::new(init_seed.unwrap_or([0; 32])) .account_type(account_type) @@ -1313,7 +1280,7 @@ fn test_select_account_code_by_commitment() { &mut conn, &[BlockAccountUpdate::new( account.id(), - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), )], block_num_1, @@ -1361,7 +1328,7 @@ fn test_select_account_code_by_commitment_multiple_codes() { &mut conn, &[BlockAccountUpdate::new( account_v1.id(), - account_v1.commitment(), + account_v1.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account_v1).unwrap()), )], block_num_1, @@ -1394,7 +1361,7 @@ fn test_select_account_code_by_commitment_multiple_codes() { &mut conn, &[BlockAccountUpdate::new( account_v2.id(), - account_v2.commitment(), + account_v2.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account_v2).unwrap()), )], block_num_2, @@ -1428,9 +1395,12 @@ async fn genesis_with_account_assets() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, Vec::new()) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + Vec::new(), + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); let fungible_asset = FungibleAsset::new(faucet_id, 1000).unwrap(); @@ -1445,7 +1415,7 @@ async fn genesis_with_account_assets() { .unwrap(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); @@ -1481,9 +1451,12 @@ async fn genesis_with_account_storage_map() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -1494,7 +1467,7 @@ async fn genesis_with_account_storage_map() { .unwrap(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); @@ -1527,9 +1500,12 @@ async fn genesis_with_account_assets_and_storage() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -1541,7 +1517,7 @@ async fn genesis_with_account_assets_and_storage() { .unwrap(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); @@ -1559,9 +1535,12 @@ async fn genesis_with_multiple_accounts() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", "pub proc foo push.1 end") .unwrap(); - let account_component1 = AccountComponent::new(account_component_code, Vec::new()) - .unwrap() - .with_supports_all_types(); + let account_component1 = AccountComponent::new( + account_component_code, + Vec::new(), + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let account1 = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -1577,9 +1556,12 @@ async fn genesis_with_multiple_accounts() { let account_component_code = CodeBuilder::default() .compile_component_code("bar::interface", "pub proc bar push.2 end") .unwrap(); - let account_component2 = AccountComponent::new(account_component_code, Vec::new()) - .unwrap() - .with_supports_all_types(); + let account_component2 = AccountComponent::new( + account_component_code, + Vec::new(), + AccountComponentMetadata::new("bar").with_supports_all_types(), + ) + .unwrap(); let account2 = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) @@ -1601,9 +1583,12 @@ async fn genesis_with_multiple_accounts() { let account_component_code = CodeBuilder::default() .compile_component_code("baz::interface", "pub proc baz push.3 end") .unwrap(); - let account_component3 = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component3 = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("baz").with_supports_all_types(), + ) + .unwrap(); let account3 = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountUpdatableCode) @@ -1618,7 +1603,7 @@ async fn genesis_with_multiple_accounts() { test_fee_params(), 1, 0, - SecretKey::random(), + random_secret_key(), ); let genesis_block = genesis_state.into_block().await.unwrap(); @@ -1649,7 +1634,7 @@ fn regression_1461_full_state_delta_inserts_vault_assets() { let block_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(account_delta), ); @@ -1773,7 +1758,7 @@ fn serialization_symmetry_note_metadata() { // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type // bits let tag = NoteTag::with_account_target(sender); - let metadata = NoteMetadata::new(sender, NoteType::Public, tag); + let metadata = NoteMetadata::new(sender, NoteType::Public).with_tag(tag); let bytes = metadata.to_bytes(); let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); @@ -1866,7 +1851,7 @@ fn db_roundtrip_account() { Some([99u8; 32]), ); let account_id = account.id(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); // Insert with full delta (like genesis) let account_delta = AccountDelta::try_from(account.clone()).unwrap(); @@ -2062,9 +2047,12 @@ fn db_roundtrip_account_storage_with_maps() { let account_component_code = CodeBuilder::default() .compile_component_code("test::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([50u8; 32]) .account_type(AccountType::RegularAccountUpdatableCode) @@ -2082,7 +2070,7 @@ fn db_roundtrip_account_storage_with_maps() { let account_delta = AccountDelta::try_from(account.clone()).unwrap(); let block_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(account_delta), ); queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); @@ -2133,8 +2121,8 @@ fn db_roundtrip_account_storage_with_maps() { assert!(account_info.details.is_some(), "Public account should have details"); let retrieved_account = account_info.details.unwrap(); assert_eq!( - account.commitment(), - retrieved_account.commitment(), + account.to_commitment(), + retrieved_account.to_commitment(), "Full account commitment must match after DB roundtrip" ); } @@ -2155,8 +2143,7 @@ fn db_roundtrip_note_metadata_attachment() { // Create NoteMetadata with the attachment let metadata = - NoteMetadata::new(account_id, NoteType::Public, NoteTag::with_account_target(account_id)) - .with_attachment(attachment.clone()); + NoteMetadata::new(account_id, NoteType::Public).with_attachment(attachment.clone()); let note = NoteRecord { block_num, diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 271c5a8bc..ae071c175 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use indexmap::IndexMap; use miden_node_utils::crypto::get_rpo_random_coin; +use miden_node_utils::signer::BlockSigner; use miden_protocol::account::auth::AuthSecretKey; use miden_protocol::account::{ Account, @@ -13,7 +14,6 @@ use miden_protocol::account::{ AccountDelta, AccountFile, AccountId, - AccountStorage, AccountStorageDelta, AccountStorageMode, AccountType, @@ -25,10 +25,10 @@ use miden_protocol::asset::{FungibleAsset, TokenSymbol}; use miden_protocol::block::FeeParameters; use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey as RpoSecretKey; use miden_protocol::errors::TokenSymbolError; -use miden_protocol::{Felt, FieldElement, ONE, ZERO}; +use miden_protocol::{Felt, FieldElement, ONE}; use miden_standards::AuthScheme; use miden_standards::account::auth::AuthFalcon512Rpo; -use miden_standards::account::faucets::BasicFungibleFaucet; +use miden_standards::account::faucets::{BasicFungibleFaucet, TokenMetadata}; use miden_standards::account::wallets::create_basic_wallet; use rand::distr::weighted::Weight; use rand::{Rng, SeedableRng}; @@ -279,11 +279,11 @@ impl GenesisConfig { let mut storage_delta = AccountStorageDelta::default(); if total_issuance != 0 { - // slot 0 - storage_delta.set_item( - AccountStorage::faucet_sysdata_slot().clone(), - [ZERO, ZERO, ZERO, Felt::new(total_issuance)].into(), - )?; + let current_metadata = TokenMetadata::try_from(faucet_account.storage())?; + let updated_metadata = + current_metadata.with_token_supply(Felt::new(total_issuance))?; + storage_delta + .set_item(TokenMetadata::metadata_slot().clone(), updated_metadata.into())?; tracing::debug!( "Reducing faucet account {faucet} for {symbol} by {amount}", faucet = faucet_id.to_hex(), @@ -522,10 +522,10 @@ impl AccountSecrets { /// /// If no name is present, a new one is generated based on the current time /// and the index in - pub fn as_account_files( + pub fn as_account_files( &self, - genesis_state: &GenesisState, - ) -> impl Iterator> + use<'_, S> { + genesis_state: &GenesisState, + ) -> impl Iterator> + '_ { let account_lut = IndexMap::::from_iter( genesis_state.accounts.iter().map(|account| (account.id(), account.clone())), ); diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac index ed79a49b1b58f01a656883fcee580301361de395..6bd49fefd83ede7e1dec802e6d17b06c13c1587d 100644 GIT binary patch literal 17931 zcmd6NcRZHg|Nnj2d!%G%kF1+Q_KK`Rk#QTD4UsJ~GdnYsgzP9I4J(=1vO>sC*(xE* z?_8I=KJV}6)AyIp@2}r^JPyzMoagm?yjW@Y2L$NSH}kj`EiB!GR;#nr;W+QQk|1w85IaPhL`RU7j?2~c8h z<7#1T;cD?OowmEu@P`A~k`NULL8leZ>A_$b7gDAI|EWRd0a+MiQIL;;d>mvkkYNE8 zkX1oe2U!DTO^~%fJ_qtOki9_m134Px1d!7}&Ib7*$WK9j266?+RUr3)JPq;;$g?1S z2U+SeE0`+DN>G@eySuwECS?B&1^F=RpcJY3 zT}a3-#0L@zvxTkt_ZnFGR|0yc&Pk*3xG_KelP$;x)BdD`xc5if0)G-B#AU%n3I68) z(;>P)$qTU@U}3;uhYw1XkCBCjMGOv#kMB=(0lGg)3;ZtpU)m1E!2rkipRLY}!ezwe z#K$PW%?%O}Bg87v)Z&-ZMG?PW#CkEeL%Epr2@b^i>bm&H$%{(P)O!)|@v&+23NG{e z`{m<`iV|U93-I{lU>8xNVq;xwCmo$AJbaSmUl2u!Nm85;s&C8KIKqd<5W!C%iI5ND zgoS7tI;Q`@x{0j-1BMSn^s6o1e))j(EDBpJKu$#RKMMXa5J45)euMrO?+@Ysc7h1a z$U@5UE2D5(NmYXB^b*~=)TtoW$g>ypyQ9uN{4bk`^uOezP?U5`%y45+Vq1hBX#n0| zErNU!`1Q#G!-Iu{81CPQ>o?;3jW~Xx|0NdGKKlE=4+cezxx=1qTE{mUf#J>z3KhRy z{sf~Y)`=V3+;a;f>UwP4Y1z3Jqv4hQE!BYS17?l|GjnXQh#Q3>CgCID3&2>W8RNDZ zZChbvMf@nu$nzU8ZF3E>Q`~-w7<5~V`^U822XvSd*w_SqPfaxH|LcZ9tMf3J+* z{mN@QE94`}j)e1v)mwn5_c2(4HaoQ_^ zJSlz1bz-Aj=l9Abnh-ZTm|-eB0}H>Dcoyx#2B^!vJlRG z0s&0=;Y@UgRq%80RsvhYsQKM_{i(ge0<%lRQcxZC4J_xxaK#~HPsYPYC*1e=aXvQ< zv;I@|mibh|S@0k-68=zuk2$*M&HshN28+W9jR$1@Apd*!qS@ntzx2O#8Go0d_lc$oE2NV*WkY@FmS`dHR#_2 zG8}d+m7I@Ajfjt%67d6mNBW0DU{N@Px$fbGDnIO$jagLavt<%M5vQGfF059Z<($>C-Mix5G< zeFWTSCR~OA?849HQ3No(ErZ{-Q5b6hjc*9J?Q<;#0p)e{x-i(MJ-p|YA;@!C(~=Pg zP%}x*K>%lQ&&L2O38U`qgtsXgi z5J6#xX*$vo;K?I+1Oa6Ik$OLy5Wszc2R`wkFe<&VpAlfi z>bmDZ<>b^}JKvzUgHL>blyb4d90vO|N2#MX0n;(Wm-~=GgTffa4M7B5KwwY ze;WaLlS4ldK=djFK1rf5UElj>VX#kg&+8iY#(k2ukPsq3%t5~n1EBR&k~t6o+H^Vn z2w3@i;TQrYXeagt%hsse4Y*{V_O35(#v=kYy~Y~|U_QY100GpxK1jb{wxg5n;Sv<4 z;c)(*xv9W!#7N0zsT9)o`!x5UY4{%U97D1=vgKmT3~KR^lGlUMn+Qn2WkyCTW;AeW zFWdzcl0~=*g*hJEj7-mcnrn`mszRRIa=<~l422oyr}RRK^tI+)5D=4C-iLtssg*rf z8eh25ASHF#Y&d1d=19^@vX-yshyW2{SVF1*Hs@nHvhf|P`08-+5o^o%ii@0%VZ;TlW zb|FK0pZ47Qu4EueOspRa5kSz9w%3{m7$^32-3f_XvTzj&lX&tQf-vj)3VYL}{K|i| zrDv@^pF))8zNubB0E5(-ZUoe7`0cgfTeY#hC7E?xHoJ~VkGn6Dir3;TY|m*d`uR|hm@S&)K-AOKFwWCqF+It zdxlfZh5&q?;k`X5DkypzDXDyZ+!_JOZzgvUu%YF>x9wv(Uvu4ti}q=5xx^wGQ79Yl zXo3L>BRQD7*JtZQJl;qN)!04cJ{a>ApCktG(tyfRubql5j?V6}8(q3{aRL5ou!tw6itujh>*W^ZIbfKF#_0F6`~(9^}QQh?1c; z`Cj|FUmo3Ci9e_|A`xI5JuONRj^JP!GO)nAqR`CPe_<}*Ru(&=kew7=1p^f3VWhAI z0zO|Q4Msp(AVmrS@&kzWUVOP+diDm?iHI@^xN4u#(b^0m^BdDP>%M3A-s^EcSO6F% z9gjx(5%YONQ2{A2*J6K!fJ(PzH3TGZ_~^i3pZ0uE=L?YM%!s2b5pc-PWN#IDNqcE; zU>q)@oq(%QnBe<~do!>6vC1AhSS=tDuG*hDJ`9Ekg6TjDxwQx2-%iJ%7^(S3J@!8T z2e#_pYeZ0lXbtbUnw zc5rdVtpXn`|MQj{F2(%8r@ik6w*C-@01LRj|6hXmlhR(?D5w2d`W-hR`1S;Q3`YGc zDj_-szCX2rmqbZngHnzIacq@sM@F-hA~{UjzsNt|v@#>5V>YPoQPhgUT*({T;B z%9UUZ`}dB;q?V54r#?HZM9^&-pBn$oOa6KN#`>45YjU^Rwzf$aS%~(d- zI8$rLufXy}5I%?j=Cx~TSoiZ6scF@18b0h|CcE&(h|c7(&ti>rYB;m4oFL2r$zRdA zAeAg+kYZ4uYn#}y&XL$H-hxky5#ptSsZZl<(aI19M9 zs9lCECr5i8HikOX^AG|5;I=swTa7nD^@z>UEYJGI)%gtV-Xz<5x`7_^^0$LX<8C7B z0&KVCXOe>~5$6C};D|Xtx>~m__PcQWL2nt$Fk7@!h?^3K15W4_l_zdZ3xTs6JZ6?P z3FP&!a~jXY0<9@xu5b zW7CwEIfh5L2_NdMIhRrb|Bx}G9ze7@KQV1OKct9a{Df7Lmm2-}7%x1qB75p&!l%45NN-HW|dbts~BD%Hy9y&`=tEBt!RFRov(*CsM2HT zVn;+hKVtQ2U)2&@>$TJ@iBqUS3m2yEuq(J%KB7Jq^KFvl$6Adu_C@cn?RN<1fxe@% z({`(#GU$Ip^m1k~>MK(_LFf3%Z3{u>dm;LkH$GkOrv>8$jA;~Uu;Rl}o@BgZZtVFa za?OV^xn8$KY5LrMUn)Xb6 z(@sdo-r*I6X8wFU?eb!Xk~=pKg>7_2-5XQtV3)$)@HVQh%gcRQq4YqE0AylrM-_#@ z!t)KE?%c_*m|U-+SSy%8Q}cdFpPRX+XTZ+@{Da3Jjb(?)kIk?Nk{=5X77g2u^au_t z9_PLy=ECvKL`&zM79;Qtjw3foC5ffIt0mz`<8&mwn#$vv8QxgK+>h#yo$WJlIU@n@ z6>OL8s7yg!tN86NIQaA0X(ui%@Q7aEPqi3gIp?}@2fy?RGvL5uwP9FVHgD6O+nFc- zO!y0t1B!{q>5fY}sNhw4?~HYBivykl%sckH)HGj*#HgrU+hHk;`6hE{CyDeDqcXkI z!!M7-!<@ir(jVr%wudVV)nXj-o4#{`RE2C-v<7;qeFNI%;|4cs(Ue7Mhl)Gs!SCx|G@F zR;1`EQ@^wq%3x-u zKYchOAq}5);W@|OO~1wULh!3vV(G2)2=Ar*+ulj9cLE)F8Y3I&t@V}suy#XvmGDMx zv)W1XQ?*9Dsoxw&dzd*$FFvK|*qS;t5Ie>T?816x-y*p~Yql68#5>JjOGJ&;M=Cp;JyE^6nb#iR6IOi6`JuhsCz;Wdxr3#7Si7{N?>`uh+AAtM={i=dhaGzR zL$3HSrv0&0Y3teAYk`lKu)LlvPAy(LYO7#fugUVmE1W?_4A*;DElg9dldHUr=y3an zHWU5pqYE*M;Sa>6Ib7qp)O{L&$LJ@}G#PQGt6)v(;9lLzBo zB%iZ3REV8WBzo8^~18+Wq0U4u==KpZHr7ovir9L`xlHcYl( zpQ4#bta~`lml64#Z428V?R*;?pGjbY)hm1!*V!RpecKFwlH}b-PeJxfnlE2GTH}dV znNJ3}v<`x6Hh);po>9Vov^d(a?vzvca0@%hnVIF$6!*J1^IsoJTwKkikO%R?ydy6= zVxM*z5(Pcaniy*;_pQFB6(Bf+TUHg}F|m==Z|j7$%fD)~KsGx=+xD|wsrO=N*u*?# zy$Htw+jJ=5JKVf1WnrL41@yKprX*hMs->+(GZ%lzB7UOS@==(u;pQUs0m^Q)%maBm z@Dc$Mn$B=h=t|96&q-I;*}_{iG>u!L?jMDvAGT(kf4ewJmZ}Wmfb~4YEe#~^?8W;3cmJBf_VB?%VEy>Q}jpdD4Cj{f2 zdNX`s=8J6W8NQw$t@e+1E0a__{NJUp25&q)1=a(29K_X$+qBe`ucEUG42wh03)7O- z9N5>ZI902*(PcDA_;^Nm2E`I&qA_!}DsFca=@QA6d)Kj^)syfg8 zj?DEz-xkG@aiH|L6g;O(=zq9=VaAwElvboX zAIoEen6({Wycne+TN^N;2~e3jXSLcr^MlrkKIP2MABO4b@ForV!&B+TM6K<*N7ZPY zyJe@#biMSZ{5d_dzIj=pzM>HBScC=oL_C4xF6zBxZnxHjh>&4o`L~3_RZ$iH<4GWE8(cO`gZ8_FT34JeZ0~uQ`c~W#PBb?x-0ScS<A(-mc`G6V*O(g=+_G4zr*)KV)|(; z?VzEhIzfYObm`8Uq*blhp^rFiMh(VO-@v+~-kQo9#6NUc8Co?PM& zTo;0GZQysnajH{FP?g-X3kh}W%X+qIoZJ*FHfp7=8wQ2rl{fFoUV>k)Kz}rlu{@8| z;XZ9$O>Av@hPv1@+s4&WK07M=Jm=`yNsnqDN!bw)y9N3~HQVxADDk zM!^h~7-TOaY)cLL0|nkGQJ#9ws#`onOv-Z~ksV1o*ya9^QL~>%Nk8IRdIP-{Yhw>q zFYA3%t&F+k#B}cihA&QuHBk(c2eR7NUXo$%lrV~z4Z+TV%j!zny`ba#xml+0dV7s{9boWInM?~q`UbVQU=NvdC5PaY4!WRX>wwK!k z(bH)rV7$OQ@wc3hqAXiDQE%B2{Vc`j9M21Sk{o_Z?P4s@CkYlJ0>?ONxbN+QFqK%`R9j3 z#`v3hcs|l6xXginhXM86uqj1Ocpl3;Vb*&+#k7YuD&72O-xFTrMqiV0#itqhrN?e8 zh%1(gcb~v+;&Y4jWyg2=bxj%c1|3?na<7?Y*4;g^{>6*05K&Molvp&*pEFb0NIey?n)QXTt6~pZ?Q>KF)E)R*$psO@V ztN;h$^}Ulz#ITb=3?sTR*GobTw6e9W>+Mv3cKys4cqo=>4fNnRM7&t(nkSyM#&5XW z)r&=CK42!1PEpi9=tt}CltOm^xmg4>|2~DGm-1}nbt-ns&Q`WO_oC11YKNFvmva`^ z_O!p^H;cf=QF)|oy=3^ond|2sudDGY(`dXi?DVuh>N1n-DnUGn{}OD}{_vS$E892# zv*_(K)0U-6p+cD6FS_=odCU;~uHj1Bfb8f~K!KBg$SJiWBDsD_CuKEiO$e9_yPGcS z61^&FSeHx)x!ZNl;}+Jg%NxU>Jb53#GPWrhf>XP^4DxvbXnk7NC&y6Z?^p!I!JVE0 zID?=Hn{%QUZdeWZv*L}-hM={K+RqIqGUBBT%iWZD8waMU{#C zYCNxBu0ARJYPQ@k=$X9m=GORR6_~&Za2zu0tZ~!lg0A4cIXNGxbeq5zZ`kL%_e|03 z-ED5AKCy0lux`Nf_aNi>ub0Q&@Z)35GUUVFDi4s<4B4P>mZDpPtuH!9!ySTw2 zGB9PGKHYhwtz4=x#|5O|XUV26X*WM%t-SctX*_8M~^^w zf|=hnwsnpQY2D6xj&P0Wi4fufbRsHj7X|G4!)Luv=@YlF^|)pD(AJs?`_A|nbD9+% zkgSW0=8QC?vn?*-BLpO{9{lYdFGxUV0p_he_W+eJ6o}x)`$XWe+S>TC*Z< z3iZ3XUg8PH0nAgm7Q?AVsqOx%@SySL+P&TE2cwQWk6Fy)$LFr+H2NIi$J$*lz&vrM z(UHi=Cn>rX!;K>T%9&*-OL}qMw=_dA{FQ(=hy%XqH~#ow)yROQ!PWdmUx8`&3#O^~ zlUbHxV$tfJ=boX7*{=cr;C<_u@K3=uj#rFFiE-~rFogTK2+s0Jm}(iVb!dlPZ=zK5 z1sre|MU5XGChaPU7Tjs-jg$5hHoC03zWKVA;2c}6#qLx25iKRGf5RX0hVyU(G2~Q*G|ZzXv3BXUUcBzR zaG{hXv;flxN&Ymbvs%8JM`JU#C^~p$uk@K{Gx=jy z?m(6mvYKOWaNh?3-&g_l8La|)>hdrt?k&P+4w9y6%#zZ_aH=%b4SfhKNTkDhgF(N+ zymP+P1nV+xw)bmWo%skQH)5vU-#=E?Y~S$BL+MQ9wdi8~<6#I}Nxpbbk*Dv`E4JtE zibSOc4d~q#i}*xmrS*?r-vN^kIm;XSaZ4!~so9qncIXn8L>Ux*HT?92a4qNF#NzNI=_ zIpjyE2L!!5t543ub%!;bF=BUHKejDjCmggh8{n;yop{}GP%FNXGZEe--fzkaDAYPbv7}8U-7(C({zU=6I+wc4oSJ&o;qr$1gG4_#qPoJVBbFu#Y zZNH2Hc47a%U5(Yc?nWeZuhm`FbH3NlVup!&pj=N_-sr1hPjmo9Ay#j0q2%>>g06=T znf0E_8y_qm4K-?dHSPUGFi*6I=h?IDw^$xZ>mI2Lo>!N_$J64XW%C(f+;xetT@*I{biN(Dq z_?FDyzu(aNn|(5u`jFN;`Qv>bnaO|y+ocq~zr{LJLI1h#v79ku^A2f#*dlw@)w``vaE*APjOSB8 zyfE*H)nP74=N^goSJV7dO;47qS>hj!#J_1zB&sTCGt)8y8wJUKajEFtvV9E0t+K@$ z!4gf~?M*9h$~WUnC-g(7S?09Xwy<#|KAe?G_Iwb6udmXWmPbz*aQxJ*cRr2js|!v7 z-%qWKqyjB&Ab4VFyyFsAh9tkh-I_P2Ic8?~Tjo++Qj>N*45hdIxb*xU@DJvVxtj;y zURFF4|&S#)S}q0^Mla`~ZCv+VQhI-?M;j`~6P zat-VsIZETzGHSy2S=+2K-_FYpjq-&HQj6?*x9M0j?#Qwx!1EW}VWIN12w1$`1K;Qp z+R@4@>k1ed=_OcTeh!;i7fNOmS0-eFc?8=Xv*4f}+hKRkRvU@9WV&N0ICRpI?ctZ{ zZ8n8`)FEeaL9E@s&C4th2W3Z`k2x`@y+EUOQ7D1tZ+!F%j%>PAF6a+FK&*_lQDx`7ac-BOZz6Lv_-DO5`Y!(LmTqcH z_E&>PSAU+y+V#9S8h1AL+s1?Jv)bK*%Vl;`krn1a$ug|MPw~qrs*Hdh3f$SDa{URf z8c=+n3;iUTp7|op@Ay5=t+OpM=Fb(QPJg%b$t(cv2iqMPF_Tg!OsjFN;3^?bsm}FOrO{#tV!)6RzUNnC<#ab~bL+Qt|}kJ+)v{fb9wZy^v?mdXg%(r&AupU(c8y zQJ~XY9Byxr99$@p@*-E}9xcS`{q0`w35XTe3zU6d@wlNzZ{g)Bhag9#xYT009ch-y z*LCVnn=)MD>*at0^CHhy=Mj@K)tH?+qVN3Tj_|vS4voS0cV+Mu_35@{Zc&zF?fMFT zGSJJu{z&~tn3qs*yo*dE#IuAgv&q*PX1@T&n(-z$XARgOWM*hvEGU( zELE=-R2=T8C?*zomo>jfL-}7I9J*|!{5Z)_l@#-7x-6t&jYY|EMLMYsEv)lU#Ki6e z7zcuY%yL%KFe3mr$9__GY}f2OvjXalGUQUpo}zq^Eor!1?IqT~Z+403Ms@bIS-BPY znHg`oFvbP$W{Iu*N`@2qD}J%2;6URq1oXo4B3RqT^TNfHlv)mU@e1$5%+J$1(-n8H~lj)ml-Gll(x>v}rt8a9&uxvQtt_AjlhyJVuaezBJ zl(;?jT~F6d`b_R@uDb=k4>si0xVIWVua(`+`5<8&Eoq9iiz!*i&8#puEE%-RfXGZHDu!X(<_`>>`zM=Kk_t`c+2O9tf=HW*q`_3e9 zo}X*wC3f{NcX)8bLB_x|^u(4M38pOkC@1y{Li8g!OOf%Ll_;6IK>vuDivpP~VP)?N z%G(m}tge0{JNT{w*cAbK^;#P$u1`F6whKD~PCY(*(&mKO3K5FpN9Lig3v)RQ{mp;_ z^Ipcqhvpg7D|8>o_IiFafqa+UJ)?a#qOv=M)5j$>>XHw(UEJ6|C0-a!2%P+SOY?Kq zhoj2*H_7cdeGcdUEHm_6Ruv2b01Uy32gKCbb*`_`&V~V@b^HjrDtR3_(YS)R>t(O{>$h8<8LuWGglvACm zo|?VL;S{xAM%e+hU><>kL2}=<^vWgH<;k=ib&Y@|#mTYQ75sGS57quQ_{5!H9>MKG z7(v+kAGzU(JMa}#Jw)Kz zqElwGAb21McgeXoZldqjoatqtcMRyM7EFKkkM`cJbQ!ZBo^HZFs*#a*f92iR#83Xy zA8z_H4`JIy#zuiZbLhhWx8J6@yQ=xi`y^c+!E=s8wU1JA{a!z{x^FLN7ufDquUz31G@m5uaE`OQKOs+6s6X{( zg+k2{`cOu*k>Yw_6Kgle@VQnP?o~sp9{E+z`jo9xRm5UtR*k`8GUt7h`dMm#6rB8Z z`gI#8+?Nt9_eLkLcbv@|aVS;Z_LC^B8Unt-Jk$DYo;x9OxOBbVTM~M$Lbv)_o>4k0HE2V^_7+eSg{Ia;RmRrL+|94TCGqwk5dOP}2CJa7yiQ}A~nUhSLl52D*7iTgPx^RE{^ zMdOZf4qbm9cs-7o4&QPcZw$l>+Z|fPo>$9kGar~J=+&t_ z;g+0q=q!r2yARks0eD%uQKjZL8oA4-@T*Mo5Ay~T5+!;lQ&LX3vPj+MROFfhdT_k2 zqt3qds?OkEwpI{|dE8yC%Cy95Eei_YpixtPc;X@qLo8H z6i=CX0SBLv#1*PxPcOSuSY8RkXZ_%`8>P!eojk^egMJonvp&?79I}BfPWvAn%Nt#T zFA2f88iq_4x39X1az<`ToGCfNQkO)Y)iM}Y_?36?*>}A1yjjq{l7N_Qa8KYEXF8{K zh&ZM{z%$*n|} zhAYkTr-l7-+mHkC)~sK!3Ywa{s*vqaK5Udg=+78*1;SYC$A)gTHf z`>28Kz?KuUjdM}f z$uFuqUNbo0gWr>}#8s@`RzQ*;W3OE4iyEVX15G4qyi)E`Luqx24G(ng#j0uRFM|Gn z_3}Lq9dZfFD&DTw_EA5OZg|hlw(RAB)eq7yByaV%&C+tgmxQ4GxK0ve{#0pR zIY${4bokMjXR21x%iU|}d&ch{P{IC510tK8DCAn+pGzGZi7)Bi+8jCW#KsX`VAQF` zqpqvP-kyuq`sQ4=eguJuO*TDD# zJgNrm1&NtAf;-vst_04Sw)UK7%=)xeX0L}UlE{z;<-2?aJUCw6b%xKAnU1sGuZJ~P zELOd)U4OFGK{iht5}Gm>KvGUOg7q)#SOflws~lxVUPOa^zG_T*VkO7TulDWXCH1&G zYi;XafqybU&s020PU%9LO4ba9!tU#B4ae>;IkrZaDw?cTGo}mVXV(D-B-S-Y$KHH% z(bLR?jpT+FvLP)Z>CSH#Cu1M0hK2;+u;2X#NU(onYPp8nL&r&{P!R(qZ}S`@)X-Gl zbDAFM#hzxFBqHJ30CwSV+C5oaC4VntmyEzzLtm4O^RaZMrhPyI3AP+}QrRi=Dat4@=DH6xO zn5al$OHkMFk1Hxr(om)Ge+EfK zI1oDyB#Hw`;XraYkOmHP76&rMfgErkFB~Wk2fBp=CE-91aG=LH&@&vU83*dZf!^Uj z^El9F9OxSk^c@Exz=ddWA$DBoC@v(23#sBlXK^7jT<8ieg+TaiI^mAj|`(zx+7sZ#TXg^SV~oH3h~RHOBKg~w*?&8v{Oz#)x5@CgALadR^Mb&HLlX3V0M~Q-^8f$< delta 4427 zcmai1c|6q3AK&?|W!<-RJ&u;O?p?>awuawMfva+M>i>!~DRy4+VOPjrcL zgwRC?l9Wm+NtASvWokAm!Av80cMjFcqFxx8QnpRy;KcteQvRDftI>tconyt)V67}KxjMnL;bqj7mM`x zxh$tgWAzRJOnI*P{Mg*N46^FKR{W0<41MGVf-!@S%bvIF-@dFzuKm}UIfn@ZQEvKT z6Kz74Vj6c>^3HVE2QnVG?*fQmh%GR37IG#F+mV5Y2>NJhXsStP@=4pf=DkH6btM(D z%Kt)uDJ?C@t%ZOr4L=L$0e+}Tw$zg5Hq5&*V^ZyWi9lqK)Qo=CBtJQy)1U`C$;+X? zV6TlhcWQci8plj##+n8C+5}myX)BRX^roVlyTWnf+y3f&dF~r!q1wzYO=Yfp54{uP zZ7YWlgi@u{8GQf$qYhW|8v+8f<_j&4SGs`lHgd0j|D$s2=_HHQbpNM+ZI1J&GR{l& zd=#5rLh*2kkNlSsZWUel*C)iwv{438ia=iA!aVyqOjN!B5))>(W za_)wPs#M|s2`Dkd|F#_LvIiH-^OXi;OZS_P3z$V7%C^`oNZ7u-IBC7qjyawPWm>*^ zA4MW0Ux%{M&OghAsgzZxaV)Ct?&+O z-kf`LPEbR9KKMo6q}x}CT7f$KAS zYG+H_As%yGRSXsL-*#?f;_NlOM7teU>WE23ph@*ATrm1syxcpk;ket%D}#a!!#SSf zeSXgmTpBI&b^2rWgk;pH-5rEZqs{3A3Ul0N-1mFLtM!Le7Kh|z7gCi*PZfF0@y>1* zd+cwkQnpIM@hK?XQkc?x4zKTt@ZMwQjdwHyOWv z0!iU1kyWGbp?&m7`+}gy$FE+{eR5%KOR5d_W3@T(ymO9Jq|4vlAkY!OC% zY9?Dm_yg>FHBNgR{dlfl#h`ruPtpFUPac1^SdxE4cpD-P&XZ^#uduaTwI}vAGtgA( zi|a9_M^3-l$@HV$hoVJ>eiSp19&SU2D{Y~Xb62&K#M!^y-D94_uDllZ=cz`=-5JJp zPZO)~yA4m&j|qnB+ap3Z$&Gp^D0_2oN>+7{B6)HUBx-e9e<>}eK&$8>2tW8|9% z>R{;o`F^#9quX$$m!v>Gm;O+nFJnVQ*M*OfmtHPxQXiU8MO@APTZVbon%-~g2404k zxSI5gyv#7V?&Fce`FvdXyxFiRB8nnh7EnJwblou|mR@#2oqNYOb<(F*tNV3t^`V7o zV|SkhvyeY%Tpr;eb!&XF3hx>Faz~I;<4>K4&mvxnH{bB|GAOW(88s0#L+1s)-4nI_ z7A!lhUwLPQS!pupPawK~9$EFb&7peMVv!4HMEuPW1?P>jgl`%?loYB-d%V=hL$UD6 zTgy|G$J&GYAJbCB*wSC~<`%zSxOMp5HYlWTM?#TBq8MDNPvtetH5!lol@loD^u674 zaP@$kcEZqW#h<$VoRqXiRn-d3bVJmGBaQ}Z?_0a2PRK`Ul?}DTP!KM1x zFe|G~f=>(BY{bENRc#a7rr7b6n_>GnuAipf=jCL@XDxG?FfNkrEXwzm`7k>!{GzXp zTSJh53Iswn(#aRMi| z;@2G;77K&R3=MeWQccT^g{q5=XIhl*c2t$i?+Et+cK$+w1%t_P(}sTNzT)6foW;a+ ztrwfDf6n6B7l{OMl89W!XFe;9%Zj}^1SI}@|gUnR9&NuOc zXJn&>Xo~`);}5+djg=!QJS8qTE_SP%7`Aa!=dOd&0&b6TDHFD)TRs_o)G8Y74zDb0 zaziXUbkzFlz%+ieODQd?bn$?k!ejrv3w1m*|G5}(WW}4Hn;(w5qlv){ROq}|BecM` zEl2uOLEgiQbV>>H#>43^*L7&M=jdNnJYR%C109-@%j4 z{ZSJI^@PA)4a>%$qp3S-4QiE#8KzY13I^*;X?O)k0S=l{Nx_J!03}&etPO)K7LBwH zRVcy$RM9iTg{3SyX(JL)gilyh(k9eX5dt$R_7#IPGulj(5?qQZWQj1BN+X4#Nl}6Y zW>nHv#8rZKP?HJ~el#;s6R!^KeWQ%pOC%iUz7D8)t zx=s`_;17gUB`=Coh^Vi$=Asq+R-4=o-dXr-tnrv)V1ltwtBIjDod{FRX=D`PH{f&_ z;Gl-t5aA1RnineKDY4*vrAdF@unF7x@k3!feNXa^94oaJQPG~{4M}x8gMnCNAiDPn z=^U--TagbEO4qN<@p?!35V?r%AWG;t{xP*IEpHUdqfXg;t&8=7{YZLZL`g6r?&!&}|*H+dZ zUVb2u)E}BvTB;ZY@y8Gf%Lbpe`GmwI$Gpvd9whW`*970x0r`M}^65fVn||+fdZ;?m zL*r5jEHB`FkSy??&B_X@r+z##A+~X4!0OO^N8<8k#)@r0bwns;MdKOG$PzeL`Aqgi zbq5;x*yK^f9%>jS^gXUW1?2YKxAQv?epn-N7jj<>aRXl;tX5l8Yoq;I{hiB>ADMbf zKRSqWKlrmIY+ulcJs)->4$kY|6Uf&RzUc;~6M8ZR%72Zo7PW8p@4eErIW1((>j(OK z(3H>ESuS+%hw1sKwl(AQ4rW+=9jL$OtY-|4-bg#OAuEmcKAHgHMTnG$^TAwecTzHvT>wX|shBb!l(V7X6^T@6YeOX+MoLr|VnfBW_+ZLx6GsD9 zp$a2D_`-$;HK=sb5hS1i1=&>6QPfidGEwdr8eao0XHzj7KA6C!k?`n91(vX>yd)$_ zLnTJ^orc9znNsGpQ*GA=P0mOAT^YYz>SZTEITIRS*L^Az$>NWLhx>N_Qb>UBIxN%G zG?u>jIb9=g_mgk#OOfh%3@pHa z0R}8F;D`Zt3ce)kmn-Aw;xhSh_9 R_a=n44*o)5X1UM@{0EmtB0>NF diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac index 13c71956cdfa0560245f6ffdbe691be347d136d2..fcd0e4f9b65b163ff36e1c3f0434fea54eb29eed 100644 GIT binary patch literal 17931 zcmd72cRbc#{|A1(UG|oeojtNH3fZe<6^e|@$ZUvgnVH#Tg_4jRWu##xGh0>&*(qBk zMEuVC?W+6pz3=W{-M_zn=kYi^uXCR7=j-)4^L@^BSy)-QqESdXkNXed9kjn>ZQ~#< zZE@kk6$?)rXK86$3pXnp*FD~U{)Ke*${<19gD$QX4%QaV)-K>lH-`(CEU(y@?@53X zdmC2^YYSJ4f9Xt<>EVG-gt(HUx-kekrF2#g21{s2g&O>)0ht$M5s<||J_7Plki|ik z0r>>TsvxU_tO2qn$XXzu1^Ft-ULgB{91U_j$f+P_f&38UryxHAxg6w5kb6O%0(lzb z8IZq&EPV-0Kmc*0`O&CD7?8xV_@ER{9D?v?MnRN5mVB=Sh27cR-Gwm`$1fg`9SkBC;oV5ZkM35+5fnC_B^aMZnL`uF)g3 zB;fCthZYkf#==G*kU2R-)u`Fo7TU;0#tRM}C;b;hQDGC6#)a!z)7KC2W3a?Hx4=-~ z$2#FYU}))?{}1cNKUu^-+tTf)4@l3VaK(ZY#H9bH;2#4KRMG!!(EsB7CVcEqCx|gj ztYoY|GYZW{b|RQwFTt%-of=|`Jab;ZE9%U{|FMb4{zpCvMMcld0yhQ~u0I`R zPq1p@9q8bu?weRK*CS(2OU^Y|4X?CsDF*Bxu(Pb#=_3n;JSY?iDL*NH0M;_q7~NvD zWrdX!^`kPQ$g9V;&eqFK^7t)aF|D`UKc@CPpvNA=#U}V`YGP3Tzit=|K3Kwp_`%AK z!NZiJ6pH276!^z#^$UW9H%|R1KPrVW7KIIr3W(F{+OG{yxa{98e_%Zp z6H_BqBUL7bNXfw;ZZ2*%u~3o_k&z%VQFuW3gT?pyzcWHsu-yN&B8Wm`NZ7=-Dx-#qQeO!k zr8xix_p5LIPuHl3i~oBg7Q_rL=U5FTkW=GvQ2hQW*f=bnh+)Tm|-eVj|!M8Qg31Bv|$U@(`{* zLP2cW!3<2hRq%80RsvhYs`=e{{i&_o0=rAhnqL+64J_v*aK!;+PsYc}#NYS$aV{qn zyS5{D(|j`i40sS33BRczz#hiA^WQn_usEDB_(0}2^1pU3+C3il%kWE==~o#_;8!RA z8ae;>jpgTS$v<~x;iDkID-i*f@~5k!7!ASJGo!((N=%@^*>J^s4gRwN3pXrWgZV`u z$K$|JDfo%ii1~S_5I^8|q<=UB7KH=YYaU*xvV%@p*agL2Th^cz7wq$->YO{T6CYtI z^lP=J-dar2V=zFC1}q=2z`uzQp{Qtn`#u6T?4MEo!{#q2#7}~y$2lpCp&|IyVgSPL zrEmg}Zx5j`1i#pVOc*H+VS4cA*YX5Dq8?DeU5}*SRR8RoUuHP4NfqYs3n~h$Xs8aq zjRN$9RIwBR0us})2v9hF6=aoAb|O8Dg3sL!deSzd_%x(pR2bJP*%&J3xmJ3hyT1H1bHrVN-6>Y zY9=Y!2;eHJ7)HSR#xYSCps8Tw0IF9MbrLP+X&blfDB;pm*%9_&lMrhMe6M|B7o-@FMQ%dVNdkLd`5s3 zo9mtf6%&(t?R=fV4nFY#Qu2j%a~S-kIVv5!ahQ%Jxzvjc8Wh%0=b95zgjbR1hJcbg z`dbLdofzCf0P(A2_#}zKc7E@hfx%yzdtO_=H|`U)g@q9T5>AFS7yzvuDV9J4Xwzr+ zAz=CQ`6CDzryJiJESn?p*Wr@Cw0CV`BMuR;=`mhM0Lwo12MD0i^+EatyA_>e50{{@ z^#}9z%uNPl=UKDZgP3g zm4+9tv`9(qSl|#0{?eS$$CvHMbK&a&ZxPU^o|y#$;5up7M@IyBmF$0q0Pf50#}Lre z`UROCSd$Bn2H=vvv}cgW(}M_1H62CX7_h3O&UcU!uk*7L2e zk&=ntr_C_j(tl~sz4vlDqQuPh!4Lt2?WudMxsPdlZ`U1{yeS7)p|A_rmwg+P0B9+ zPg{EC%JWG?Y4)4y1q3ikpYB3HjfUS|8@^Q=-CL5`CP(+ill93P$WTFHo6HhV!0f+_ z?ta}EGIOvuz3+&_&!Mm*A08tsAvXGOi40OwF2}QqfT-PhDFiHvKR~Qtzbb`)SdVLqK$sNgD#5$4g&D0P5{BJpzpH(IL1pc@s=kA%LX!!d~0e-MO$gY)x)S<-%1c?3=fQd;R&CJdhtLIklm!2!p>gcO{Ww z8F}s*UKKk62zZC~_MoVs=q;qA;`vc)1gN~3*hRp)miOMakL`HPeH$+NOLI%b7SV`8 z=~#Os3{Y69fuy}YTO;Q6MoOqh?;-cW*slbn*+_|N#^TwHIG*jo?kBcUCeD#K2Mcr|{lU`DX z#RY z-ays`*lzPqqJtbUS3gGZkhuV+O1CxUyGY!AZ&}MQTZ~hPn=*(4PMGBrPuv>k183HG z%`B_qDe7KjXFhwUpC)8cV(3_q62nXewD172l)f6pvfR&a+C(62HfesiQ2v%q{Ij;+ zHTIE~8Ip}@B18^_Ss9k%`ZD>}Bb$M%_o5zk#!rDH*K-*%-fn zjWr3*zwTRPEcYsx&pN&_oB3jt|HJH4R^D@$x#f(DJ7ymHc!Jn*ydN*61AR2i%yXo& zsA98n)~*l8>3#8w#Zw^<_4QT@q(Sx|(0=>PDz0!c{{^?pD9T+cQOrywxl^%}pCgC4( z#XZv(C zS0v!Qg6+~Dmd&qik+}T@k6=za_1MLEUa|86DHemQXIP64u2uKPbuLj{ZT206ZjS0+oK&@5`d=&^Nu_(G0oEaXmYKc>IC6j#^qiT1Q2jiJ{}|NHh$M zE@5%`c)e2iNoC}#(kzpe)|Y^%2=iEsXN^_%$5EX?eXRJ{-KolPdq94`z^2HjhGjj+ z;`0JHYx={yPScq26}qN~qm`)eiPWpD2Wxn%lN+3^3tn#rJ6ngE3LujRg|Ri<5Bcbo zsCGM0my92O>gFiuVu>1(b~bk_k2H80PtA>M=eZl#OEI z%J{>#+3aKls9U1m)NPDmJj@(q7M{|!Z%!WQj~V3yc40lUZ;?D9Ha;IkT_o#b%7{qg zrRWJ2_YGm)Gch-Z<6V1`ae6tbD?Ud#qm+IW6GC1iZ~G1ySBGljJw5d;sw7^Bdi?2O zpaK42WHf(P}sQ?&oOTW`*D?K1`)}Q{DwF=AwN&?%dr(wSC&cz$RFGQ5~OG z@q9H5nuboTx8$?0ln59<3v7E}nu#mV3YKtsD3{vHAPvs}OZ#l?&XSR-%AJ z4)W3QbKyN!0c7bp-@OKOWhth4!-`HiKeU(sBs-EkyT2qCXP0i|{RhJldnFYoUB~jZ zumf*@$QM1vwmp_EX*pAKHSqBwj@P}(rNw7QV->9HHBokOnJdVM@me>Vg=xw)iW9FR z+TFh4cr@?qIqA=laePp}FWSBK#_^8M)~T9C0;%$O`KDOT8<%n4iDMX9>3M4I{}B4h zCFR|1QruuSY418d@+|h&0RnkO>FXEZU_tUfrgD(jX)3CZ^82Mgk0;C{WXETDG;~LO z1@AE1a}p?PN&*hNC$S`ajo=-ASYylM9%^GEX8pCqrk9S~r6+$qH}mtPcAo;g8G-da zs>QJ|^Sb8s8I8zHZQ-4Uwo5XH7b5ye@&}fm9P*{$*C%tl$4RJ8}CnE-0etjUa3!~ zi9F}bsEM;%PM7jy{7b#5%Te8w08@rxGb*9JCWR%>hV9HQ*I<)T5C;nEg{a^thqD%t z4YTdnrx+Ge>u%0-rNlmGTEo`KI^G7yWf0on^a`HEcC-sx-!>zdAbt1IQ-~vj_RANK zmN=3Xmg7M#Ed$`1%^%jYXOi?EDT;QiJ?T_7)XYJ8dU|Oj+5K+z+}FpF7glm86+pZ& z@6gNkn5P|v#6iz9$448>e5=G83MMn(uqgDL z2pxI#KF)YTbHS<8#8>DVFP;FLeIWn*$;HoKT>SjO#n0bc1mWxsmN2^=95&UJek%V~ zRp+_iq1oPX%LO+{8~&C@{YR##B5}O)o3C!qM5i&%g@Rn$(GU|i( zq~W=!wOXOf@29~yfG@Dyr7p~QA4qDt?&wQCAU}J`c!e+5n)EVhr>hnx_xH1EfjGM% z@}yVM%}S+GuNEi_(kyyf?F#gD6ZoV%;}3hQZ-q|%wA-cJ%O|ruc@xJ@qiZ;kB_-LYHl&vZQ?&UvHMqlEmpyoPVr>eOh4xclduu zPCbpG8!)s~C#=_vJlx?b&m`S-rT@xL|HcHIjyes#lqk6|a>L$dMnRiRdiVS~nbqf- zKOMi}1@8=#P4G z*5{ErJg2OyNUUv7(-e7T*|=INWJP72;~LpG)NXed$1C@7{ZYGE_-OR%mcZqlHot7p(+ zYv{)5Wxj8$ku{eZpXzzQ_{AxqI*M^(UuN6tOLFX;VkS|uLD)Hv{FM^x99bg<@EbCd zS++D!JaM;eeJN*v=aWjVJ{-s4WrhVD*e=?e{=R6%uo!*ot7g}*M`-FDmpPQ{OIlj}cZA@n{XxEyNf6Y9-=I)8}FHT~an36`Z*rH)Nci`$e zp@BAIuj3a7=6%J&j2lL*vz8!U_}aJCC$27H=`bJf(S0AHmW4ek8E=P~GVkYfc}RR3 zQ>jU61vm(=_nmwKmV+E(9M-*ctvJ*`D@)tD&Q5iwb0@w3p?HQh(1YU;^LSp+VQibJhy#X}EHUpxDFO^r{5R^y#vho}8vm+2f=Ns6GUl!VykbOMh!9V>My7E%Z)q*6gc^ZoK!m`n&YQ@Tu!6Lgpk>= ztMQU9@vFl6HL3WJyPan}ZsP2^yfF;QRq*jEWuKHKJh{uqsE{j&(Whg3as)N@j#Wqk z-03NTGYG1nDLZ=py49dR8~*4_2u91O?d(tj6MpKD{0%vaJN*N8fCKZgPfhlkUU@26 zSdk#0#{2r^%9DbxW=r)0o=Nj>ZjMbQgT_NnPNNa0d`8 z%qu0mC>+x39$OxE(QST1=TNT6hExWjAUmCO)ST`v>*8Zvyfb8N3m-kK0>oWS^3FDL zNEjR-2UFJR)161!DkU1T+&~IW{+>71pVN5epYCn>$Velz&@0rDER(1%lGhnIMjI7# zpw9`Am_eQuJisTsA#&0>yaRtS40ZFgg***!O~%)|xla7%xz9toaCV1+o3;;S>?5p^ zzS30C(cDyzFJgU>FEz@p^1ODc!P%og?-i`)rlhg+G)aqabFypHP3YU}?7oM}nr51J z%UZ-7XY2E%z&*S_%+nBRBry=8BGkU#@OGy$JI=A^gW7>BPu7f0ockIwBF(pPb}`YM z-GbrqW`0-M*Eq{%bUW%e!!@GELrC&538=7L6tL?LpY=jzj@`c6?UwFCS7R#TJMCl4 zWmd3Hsx~s3E7FkOwy2Pw2#~;f@TYr_Yak9-Pwi$|$x-}nW_<(o?RZL>BFMs*Bd|ny z)rzDs)bGk#u_qV@Fz>|GTU=^X+U~Ck_8V`k-rLQ3FyhGjnAJRPZ1!4qgU>zzoZYp2 z>=Sod9m(`O(!#5^cu*u?xiSsq$S%zJmZS@XzY_EYalkkI#vdQ77#XnEyP99`%{T3O z!8{pvJkwHKJX+oJ>@y4r$5r4Tyl)*5*%4~xe8qH_1btVMG2F*RXog?XRLf|!T|4wz zBbAyj;DECzYV6<;S!ZFi&~{@_tc;(C(IwTjjn_4VXW45kcAqK?YboRW8~Tt->>FA0 zeZ`~e4dv&u#Ra91wen^|r_}(V$d3;T^?($d{I%IwXzsA`@i|ZCrqdA6<;#U=f5aFv zUPfh$lQhVzw)g}8V7tyA^_f3fDseZLSJny*pJva})Dc=#Co&kmML}Ia%QA8tXP18S z#p~Yl=S$etGfE+ifIPlz+r`;o2Wt1?43n`13own4D)>M_SoP&Az0dU6-gh%Anw@;ioU8haXjT z=HdIl{>*s=r{}R6bPgQ?nQPoYX;iVwm}?lEmYG(x{#NrW@zPg}bt%{fQ5+zPkzHs0 zmf~#XkQbpI5cKklJ_RrL9kw*4h}|vynASX~;Hot+)oVn9+C+={P^9 zA0g9er+Z$vwpOhLM)yU4cwyc_NKegT;E-$hlB4%+zjKq^of{txi=>d;vX9hz`V=LV zgY)lC`(+fc3;Xx&N{rSuH)7#?E$(ujb3J|*)66veWqKkCMqib>qXQ@laC)=z#jnp1 zc0PQ_qW4_Ecz@YQs8QpqDeosjxnhO9&z@zy#qm&D_sC@Ny*dp(o{|tV6JFS?$kRNg z2h}Q;T~b?Uye4)F*aV3!llMrRC#%;vZ)be{UJCY_)e8zeZHh)Gvq?$&3j#uV!HcIq zCm@p8nQ%O%T1~z!A=~mw;SD}tu5Xo#V{rG=Mk7zJwRoRZoZUaI2XUbNxPV>(??LI} z81y}%x8(l*eTLrO?31|F2esZQEOi=)g}mGSg3}XTsM20|kd)j%sC~Oy~N(#JfjfYwT9Cx z^EMW%C!3k@H~6&u#33S)|IB>?i$*G9Z&kXx=iSNSqYRw_O}>s zmM&Ba6>I8lZCH6zy%}3PrXM=RI;*w1iHjrQ;f!>W=YtRe{SytTxeP=BM^D~-=hKk3 zGVdh#{p9j+3ee&Kg2xue+AngaO9=?xt$uTgb9!2!c{bT4C2{-1U|Q>si_h-?|6tyz zyLs^Kg@uZwvblxGbpN;2?MH=jE7MoRPCPk37#|zGi?h3nZ9UefSKmxe=_r+>PN!P( zkU2MoH1qW2du8KO#y&w{zWMV4J!ky5spTDp{6j99g$E|)J52d4mmWGb$vwZOGXnAH zsPBg_*TDXftvpsGt0r=vt<@^y?VQ};2!FT`jp(j-tBy6(wj5hLJb%F*7AjARkk#8g z@Qp5!9i4)TuAq^TUc3c%XUNRDKq`x*B0dAmBiQb!1t-nuHivVT+Hk~0(``ec!Q+`Z|aoc!NBm`<~2O*Cca%s{zpg;Hlu_DGsm4om4*v>}&_Ds&V^#@sJw7UkDO6?{i%guw5WZ6WX5|mO_8UZ~NxU)m$ z_!C_*p!_}?`bjJ;<3+09(R*B*XPRZrpDRV3`fllykq_DrwmUp*Caq4CTJ2iST}+Z( zmmWTyf2&&Z^f-In{Hg2d=iF^^c6;kaLir$*=>_2k5gG_6zA?MJ`#}hk02P?h|8_26Q|1|8e|Iw%7$DsYd-6P7PXycsg?B($L>V)ilte1Sg zGB&A49BJAA>Xg+Ml*2n-1UNA70Lh|Y0_nZvLBcuyQzghT7_-2IdQ;y(m<8Mhb9xf4bLu0%C>r0_EP9Kd!IVn}2!IA;?iVHl>Ju zTZVPwb*;M7hAg+lS{dNLyvQ?Exg=!F)n+FT={vu;Bl7NoLqqWWU0DJpefllgn^a{u zyS^fy4D_<|W({=i;uyesgj$&WcS-%Qtf|{t}Ralm8j-(@VB43gyEpQnqx= zY&RncO4O@_l!n^Ni%10DWzOx2|GD^N2M~%#V*J zGontPqQ*aRhUYwrzu?5rcA;FcH_d;|Z2IPE*ML5+?q!N=>gye>tm{tb)xhrX(486( z2e`9CN!at;^>p1}$l%H1zMJp+U|m6tXS3n+YU$nV50b{wQl>b&*y8z|jB zPv#pjbVPg?()ILi%np1WNfep6dk)!fs(Ba^pBXnAehk{ zR{B1_tTo}z%E~A5{qM?wT~VM{r?q~<^@+#!RzZ8f$;W4o+Z;1nCPq>I$T;wIem1+l zuL*Eq-pkmy&|HH$#jbr>Ue6E5Q|xlMr?<^SRCFbC`M9J+UG%}ViyOzMg!3cuffHYE zYJSfAa9Abp28A7$&%wN%QbW%r6(N0~B?fX;?)HylM?Hd*)17Y%+6^z<840CjET{9? zrrk)MFEB34#p&IP+LR{ZeP;1KZ(9Ft29YQ6ZQuB0h(^1AEKnw*Gr`0bXn~|yOT?KY zo5^dk7JIl^&?7`WsQQqaZOY?P=GgPyPc>}C+d%)qcBk{8_Qsr&I_V+F9E*WbOa@DL z8THAk$(akBPEl*6RP8_u<`Fs=B=ufRt5{@Pnn>MN*9b^dni!2)CP<_CP~~q!K+*x` z5!^0B5mc?Wazj)fEL*CaT=ODfl2W(t^0`OwM1Qz&oi|b)`LYKLg~~qnL)RVA{a>+F zgM_ZlI;A%ALi>Wyi_Sf<%c%X(R3pJ*jr836%kRD> zeDa_AaKoQv5Z5lUHi`lngCF{N{5H(pRn1@CC++kIo^>Ryc_f=nEHuU64YZDe%(3Ca zGe5TCnL3z+Z*;ooxKEf0S;HB*;LGD{_xfnmeS1K=z;>^A<%k@k{Ulk7ca-)0F$MAh z{mC!OlxmL9hf>=0WY>KgIJ?<~&$YtPR}8JX6;?d!k~dFQl8BdDH3W;xp7TxYW32{K zaPrsb(`^`cUrex2NFQeG=C-YUC-aV2=m=$yQ>Mn&0}{D@KoSRee-Go0D}!qWuMa&e*QdvuRWsp*1U&v0|6;4hr%$#O^zzu(R#Nzu4&lXpt#Ly5|kpJYkpAn*<5nbu|T z-my`o@YpRNeSYVIqI?la*zEYbaGyUNY3`%5mDLM>A*Wt5q4 zdK)tKN91pYVc$^Hm&_?KJTjr;Oy+hQc~7ie^5k~Mz9aCMg1-atYTJl=5Zx+8(#JKC zcdg(l20h9(c4HKf)e=~tzdNtDNf^Uq|Gw`e!q zAcCcqWidkKw#FN}MvoC2Jhb#ptbNt#1dnlG7hdn}R!+F3QzjD8oS>pQrFrEh+K985CbvFR*(I@G^CyO3be}@RUswRGQ`;%UE{KqSn@op*^K2&cD{D3&T>bG1`sYRh)zi zRt^DCyrt&(ocuuU2zlRirkVsU3`eOHjyH;c_6mnE8oDg@Azf8GoXK^05Q$rp5PI# zG%o8932a}0XPRm8S);V0XQ_mRYkOb$v`t;7-_wf}SG~)$!BMx*VyZkux?l&p!v2QC z$h5runY_82mgGh_jyF+bd;D#ph0r(l6OYzV4ZTvUp;Odnh-GxVl6vMXG-$JNymZ+s zL6p+=Q7<36-Q7N@Gu$pcZBBf*flO^kgVwI9QWMbp;qTzg-ad7+Trs;N&vtd5J^R)c z-UT@)zo@P_&ES9!eosadR&aWo0f~N0J@O?ls*UpZHIl0FNxMrArq(LeKhU`sqo%FD z0Qv*g%kwyJz$GlRXsb@!M}1$K;XOCo(wF;IKFGX~y4lw{L&ptY5`y;QK2Dgiqv(*+ z>%6*rmMSXf;GU!;Y$>}#j+gWq2gwC3__FSjUdbO5ku7xX+%2EX7xqJpZI9}Z~#?KQOjx*k` zhcuThR=loWd$QS1K1UZ4nmijoT1G#N^Dpd3J;Ac8JXL#cM7@2U>aDbd3eFo}?c2hO z>(IQbt!rO_f3iT&R3b`V`F!e$%xNs8-PhY1j$L1}ZH=&%w3#ht%;zc2tN{*4Y^#or zJ$dG0r&x#@C=4y+LYhU>oZl`?#5`6F4GF$(zxxf4VE;zdatyZykCIKIBKnKp<~l~G zVW_`nH$KvfImJ3bOv=3u?84);Yoe-B;a>VKIiazJz9u=>W0?$1`+$1L@etnNaP+Rg zH(VS=wo>OlrL4yCus68#^i{X2rqxOm9DXiD(Nmvd!p}^z3AE%v9*Uey)!iE43NRg2 zB8hu3UY^Vzudd-ATbQq`p-Suj7=qJt5{L#m2r)sN5I4jN@k0X85l9o#h4djTWC2}( zE&v;LcUNC6beN{x1ifl5_AVjfij?cs0b>7o(DMl ziU%>`K^%CH7#<{z2g&0>8hFqdJjfUia=?SU@Ss3E=q4VNhzC8ugC65S&+wooJg5^7 zdWQ$i;X$AApl^84cRYv?4bhdr_0Je9s2+DW$2%N#PFxX^Pj$y w{nH}-r$zTqKgRgej|l$sBa%OTnf<3j%AXF~f0_(``cdAWHZKTFI3z*;55trD*#H0l delta 4427 zcmai1c|6q3AK&@zF6+q6x^lFvb?-XXy)C)QnTI^Jl_Mz?$yJW5j_0W)VY=Ov@EOFWNF%LZsw1h?H}HFE)fGPyc0kuLk zA`xN`wjm@VWFqjh&?g%q2jRIikxT}fL~SBLQ~O_uNs?sFCX2buzbM*F5=C2l)~JA! zcp7+YDCtd?7;_~Np6YCtCCF2o%?c7Ed0Fgz@{~)pW4w+&#ZG(9sz%zg;5L0jLMITKKO*QEgBx!rs+&74$uB1Xy z`CkaKq@^W!br4Xb;b%cTAOKY95I?=e<@It~=4Gsmzn_VRT^p ztrhTraGI1lll1>T>TtDyAs|3&QfNhjQaQ%o$h-P|e$|%K$rh^_{@s6u#rxBkWl}vK z#io`}JzS!=%6P8?0yBXZ-f0$7wQ1Y0Z#g##dM?LS#WfjwocOX^wdR|zGPh21mN&Nq z=2pPm0+^eByIEe>dF5zo&nl>-rNCb@Ym4s*HrJv2$~BD|t1CZh#r}ygeOBBWC0ar5 z-Ox~#YWzO|C8qe_mIIyk;Cw}%(m-6vUh^?Qv#0}E7Q2K9+m;t4ub0|B%M+ze$=B?m zN`!sJgwnxBrHli z^kA$@rss!kL>qrk^DgE$0nw_WDgvC;GTzThy^W&O1k!@nuq1GgUPLpDYg9P(R5lyP3YiJN$ZB z&dpgt4e__aPwk2}ZTGx;=^sj+Z2RbS!P92ogV?1Vy558LLuY7&q`GUjbJsp_eQHnZ zXpTR?XRWJ_rD6VC&kc{Cy`q<7x7|t|F)0Xisa}P0qn|~~z2h5?x~;r4Ak;XN?J3^p z_iW#Vky2l$`CwX-S*vz;06L9a&md4)V?JZP-y>hHKcKQWBsZ&orZjS@&|{W&HcafX zzpYB?DhbDKP_nrowd>F)6MkCU(3>B%zP((~gl3K)gixwCQ8SX^f7@{{S-~Uvgn3Jq zta-=hez+2Bc<_3H(zpIJu`^-( zA5YtG<9@9TvktObp35p^6H*;s>)BAY!BvK4#}m%@9K0nFrI*{jH*yk5L0=tBhA!$~ z7!_6$9*IZPepM?Vi=2FVVM(vd*DL`)XVGj?@19~Ia9tUGHDqAP0N&T3^9_(MBB)L6 zM6;+sfPJsVX^+Dn&-JSqRP6mZuRr>e#~;m>lpm4ahKPgnB-+L*Z7o;rj=RkYG?n_| zdW7YX-EVd><8ar37}3EWMNFiJ`_S%6UufjqS>q&e_HTFh*e7u-uY~_`s>yNJapU^# zq-y+Z!xJrOP;JQQMXQOYbKm9TSLl2C@|@<`7BH67U%#Wg!CGTkcP4;Gbm@F!6q_h& zVCen%evO5r+fbF4q+lM8@lc<1d_!bs`NyaWFBV=?ADmW2T+RMlhPl?7-f!yPz6ddK zHR&0Caop&tk4HB5^HGs9vmsMNoQH5xP`zyMs$)nTqqJO|cgHtv!ly;6>s4>ffrVBOy-%P{Q_U^N!or3 zmYvqGx--nGG8ynE5Zylyuln2OKm&WRX!#jYe{)2^c_VC*n}!c1g=^CvFE#Q|EV%T> z@>JE4w&4E9^fWP!^w-?k$?q3w8G5%B3hUdEtx!`jxKy9UZ=9_(9{n>rP|WFjo9Dpl zw{qHvgRc~S>iTn2)0X9=Q46j(>a4AA?dE!6AE{L~WQ+Q|jQ&&d6E+4H8sfsO ztTqXC=W{rSgY&9e$G1*$5~w%B_i$Z5O}@{~&P>Q$<}z+vDBV$*=PmPLreF9)Umf>` zAORH&XqX}fIRRFvNhyUxda2my;@JzL2fg(sn9sk(vEK`%_zHB)7uomlr3| zSGrO_Fj0BtyklN}s;A&lJj#02=lbMJw?OZ@tJKz?{%^EA=$*Rp>0Jrf6SE5^1fV+WnkNaBJ)2j?lXxvo6T%6nAi>X^O3I^~mv5r2ylxH*-- z?%1%|7+hv(z#o&kw%k~_rts(qi;`WAs&aYl5kA1qUsx!AASHgv&<~wgT>KViF+Nr2 z#i8h*vv~SNB2}>={OfIxXDM$Qq>?GOYSUK+&t6)Xl}A>$NQ#`xZ#=X?X0k%(oA~}S zveAR|MS(F12VRp$E07ePk`^2lyVXSu-?*t`=YDBHw?{eDaa+^PpNv0h6^?X8RFz(H zLoEF2sP)x>W&CKTQhIdB;M+Rfk@8j`3y1vqcy*(4p&iB04T>{rQLZkm>a`9~qa$w`3(9 zG2Q7byyel5b(W*jMfv7q`<*d(jzz~JG1$qXgFoN|ON)%Iw@ETk+lYpx&Mb>%XmDg< zEHb2FFvo~a#_zFm@D4I$VDO`nJDG=?$U$3U8hIt+%E1t07wiZItBmPnKU779ea1A3 zKdPdlmJryhVObb-G<7GhLACPmxG4?0gu!}KI&R=9zyVVlIT%qDpd_1ywPKLXrjyqp zgCYz-hMs9IEMYUq8(T>4;)rqOov`iJ>-w2vg1J6m-LH#OW}= zK@GDZ!sq66FI2=+QvUlYlm6Tx6OQ%cha!6Vo|Ns`R%*@j=Jlj(NUrA_yp2N+qH~{+ z!PSbn74;yoWc|u)uXofBQHvN362tEBLl_v~2kM$VA>F9Y4k}k8rR0GrC4K;# zP`@+++Y5N_rwG31urq@iXde%ai)~yPusSr)k+}RibH&!6dLk6FqVtWWWeMENB$M6I zU4ceEHn~)>hZ=^7eUBSX0l7W*?fmvd9Mp)~iNaSy+`w1+Yt$Ci*=WB~f9JCO#|gcq zA05PbAN<+lw$B;Fo)5ba2j_L|4kR^4Y`Q`1fS$~^6~D$-&$Dmy@4a*_EInk+s|WhK z(U4F3Ss{GyhiO@K>zXk}J1e}Q9yHu@)-wi&Z=|1Ea&pZ{fyYGHX+_sTXG;=_jnHBy z7VRSQxKTE#N@;0nhfhbnnUH&Tm+^?(+iN@0uOti;A>EqpwHFNoP8T>M;bmD^)W3h| zQ%?b*yKOLK+`wwj0D)#17dWMIlD7|qh4VQQWoQPY3yQ&mz@OlOlW{z*Qpao7JnQ(+_USKLLz+EZkeH` zvGn=RsanCipL}y(s7$#!SyZI9I+G9y$qBsj_xpaE>F*$;`d-Lk)Z|C+<*Z;+bYpXn zEd-V>LCa@&u!W|MC6iznoz4e%+9Cub0Tn0#8qfq#qKp+MpFc5=*I5LL17lrwY)VROfL4S7uq1}$l|J@VncTch3jr4CuSUu=> RZ$fD6;4ciOrwe_+e*m@PA|?O; diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac b/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac index 57b462715064d7dee0fdecaa8a5559bae2386d15..bf7a8dea266f9877563f2eab003f2cf1c71b1877 100644 GIT binary patch literal 17955 zcmd72cRbeL|37{`UG|oeojtNH3fZe<6^e|@$ZSa2GBdL?LrKVvGSaY;nJp`X?3Aq% zqI}Qu>8khZ^M3dFmM-fw@&+UBaX zw8e!BS1deioTR0%*tkFt$A1)CI$g59U?VMk!N$ql)xp8WX;;n%{*)9K!q{p|G2{s(-J6rGF)$ck-+Z z2A>D}<3HI#{4niLI*5CJv@Q52AtE#@8YT3b|4)bL|0FNWx{s9+i-(I`m7j@~mQ@@M zil6^abV2$*Nelii{9oD*#mNZA_n)oKfZ@F2EV*CjqqTu^qR*^PjopIxIzXi31&HxDf)MvR4xz%z)GLsX5Loo%6wY-GIP z;BnG_K@=4>QE6Pbt~GuA5I+V>jB^VN1%9j}?gNIFp80>UZvK-+{HrZpfBAs)EDBdF zNI^{cKMMXa5J46FUW5J@?+@W)e>p*nVPYj?{gqK@HnI~z^m+-do$AyOTf~|3`dyJ{ z9{!h2MD}0uQ79^UW)`?HsBkU9fHVN#uNFZ*3I6(If#t(Ekz( zY7hPW-v@)D!QSD>GOgtw3CHr}28Kx7E_;Gi6YoF=HFe*@in$yab6j$&!D@J>eM>Q5 z|A3uk#ZDhtDC9w*NJ#lf`Ten$smACQqb)0}oTx9A8AV<_wsp2%Zj#4$0gGvkcKevx z^MD?E3>TZ=@2QDF{r|dQF!*2z6XFLeI|d&s|Ep*Iua3qI1{8|r_Z0ZYYV{j}g*Q(9 zC_gHNF&2dli1d%s>e{ajPq^$qEq`D=786q=RU=gJjQPwBNoIAF6UScC6H6& zaZvpEDcCp(EXs}1piCN3v*?T4ak-dHt!a+yc8p z%$i>n`VB1SByhz6WKYJ&%EaGy|8XuS6}$FR?w0vv{2A~dG7|n!L4ZAsbLW5Ou*2eT z!r%j$Kgj>yy=ZrN;4i~(U8dh6|a3M|~Pa1G`+fgFzmN2TB=RwL%;p+fwC-;w^|5LgrrV6VG- zqRI|BW?>f;du>?*SDdlWld5y>yiRa{o_cFBMUTM%H5#ydzykj!LWH8C`Q!Tt z*sy;^`45}Fp%6a_mLBJ%FouTUcZ&fCzn8)ZK)zjs!Vvss3o>D(IE3lJpWn+9_=tKy z1$RA?{!sm^Z+@HMz$R6g!!M{PtfHYh{5A^E6H>)e1PDk>$09)C;A$cQl!LO5BfxFF zw*vu>#Q5NsThyPt`GdJQB?`D1!6HPMe;)xinuwNQ0K4$BX#@exZ%g5~Z4}m8P~#f{ zZhK#gMnG9DgDwpAXb=B+MKJPQ=9E-80@O@WvJt>lR56Tz_l;wsFhF4osd=$UmV0*;dyQy^fDSJD^(hneP9VSvKU zgj{+8gFTufndlNip7SH{r-1j#y%vZ(*HkXhg@Bf*+gu1ZQe%Au0aQ1vcef7gkrqz^q-fgDY8wGN1CRj>_GnIO z{ag|9T!h}wMg;I27lTUZzoAmL{svsKM|;;7HsTNg zn;zqJ1hDL5e}DiQU2mjcuv<|{_HYRbTYoTb*W6^lHxi^|qeL2M`#qX_&^UAtd5$qj z0@-r0W(GC*NXhE~nGFQQqgjyAiX91<+zogB1nB}?g~A?|(ZV*x`j*rPe4k1yMi=fc+g-y)z- zJu?diz;)8Dj}8d%EZP4K0o<40k0GF`^$RjPuqGEC4ZtOPv}cgW(}M_1H62CX7_h3O zPIr(J&-1er2K190*_Hxed4MD0(Q2PAHx8s z3s<)7&JM>wZ$YHQ>tc|M6K&3;q8fB;76(_ILt(eT}E!?$Xq zyGt_Lkn7zm7?$?bWGY5Oi>y9}5911(~;W4rjVxtb1$RH)< zay+XDh}@Z%LcpT<1H=mUt5R?%T(U=VA&FD&hyXRU12WvP?$>CDfz$ToCY;fHWHc zE*Xov8wP2OaSL3sM{_evA#V_Yl8>RggC@$+?=oBh3=7iV^gsZ5f@(Lu+#11KNXfM? zk}C*!(cyv*gFV{w_HSTCo(p}VfvgbN_D|IQNXgekNhbtY(YTJm0EG<{l`%#@8++64 z=n0HGrw^Cx(VVZ({O(TfPEllvC>d%~?6$AlrIFp0_=9Q#5&_o1!=eP?2o01X0}H$> z3Qv#z7v}tLWpW@2S&31VFhF4+Mu=!2;PVx-AOxfaP$nZF&!2er#h2T;dv`z`3ooUF ztM(Wjoy`C;zp>t>p&^?`=H3l0WQvXPQVAt|GXuKOR<0OY4^K<%|FB;$O^9S z|Cb>Cq_i71%5iU&e#cD&zCFPmgHivAN|>IJ|4(fo^^a&_L;o4*|2MA>;1mD8!BXWX z{?`o#JAnfDc82J`59)W@{V!t&8CKwr-`)4cD^+6^xXX}^2IpAem$enDJ8>sDbEfn6Ly)#rNn*nRCr#u zzV_wHs=Vzo6|xKfE@pIL_I;JejB&IDLrngg5_z-zKhB0QcJ*>6nX9gceKr6~*DF|4 z@vvo0ZcJ~H&%lZMVV8^VUoLXKW-KdXoS`-7n{W9dkO0I0^V&2utosBC)wF6i3?Fu~ zke~lzL~rugd!gDoC5**ZUI^xZpDR?Rro-OtYy^>VU~1kyS5rDIym|C38s|aFiIMJy4Ix+Sc!`03aN8V+slp$oe#Guz zmTUdu%3Qj3PonKT-2nGFh1-E-u{V)*0k+%xGx4e%F;_oE@Q}Fxrb@Rp=DSGTelJ+u&XkN15TLb6Hiz>WPReZm5BZG@B zr?WWiQEyUkrS?UW0$U2eL!(gCYC5&b729STDvdtEJw~s@`*|o1ont63kT`Ksifyx{ z3v`?!Z1;ScxEixRt?H`r&z9{&V+C=A#-_N zRWiOlYA{Un_DR|KTTudx+g}f^Q>R7K#|(>ke8lP1ys9Cw)@!a_l%Q0D=Fd;v;ZSs| zctmqD`r8ESkJV}?+>72{+wTC-1ARwrr|nubY0&qE_~rCM+TsiVc<(m#-b3AW}D#UqnYNlzEss_Fquxx0doId4BhFv#_izt-NP#i!}9rP>ZOHXWj7vPO53RN+Bc>& zLCys|VXf4imzH|9LKuJ;A;=^=4kr`?3eMGkx^pM5d}6Jday5S%L&NtaZFc&ao`C=( z@DCn?w3h8AKQ=z%O=K#Ij6 z>sgoeI|L<{SpWwftMx-Na=9D!JWjlMr^8-|?o&!QN`F-9>IwV`ukF!}EeXI=gn37v zmzd`1kQx=XX!p1GP)!szX&X zYlQuYt;TmD$-)B|5r;Y=z{jAxBi_Qz43 zKz*$E*xjkhaeF|1z`&-+yM|>w$Kvw>IBWXByiU`Y@fEtJ@S~Neu!+=btp{s(tCJg? ztP5Um2RT`Xmd@DZkFdVCwwHTT;HX706?Fj(s=`{L||@G0XD-)6Iu5uk2~d{ehEhH*E$DzosEwtaK*K!40A zAFvDSnSG1k3AXY6DC#U(7gI(=8ZSjpsJL$k>z0YRH5~8Kn~c-TQC;yq${DHjqnHr# z9C_P!z_>a@8}I3VOLe%3=4+A|QhjOEH(5@S1^_J)`e=Qj~S{I?>WcEb$=0SF;8#~IV6PHE!!N!2t#Tc!2v+sP4(rs1F+rvr}kjOzkI({y!$I729J?FdUfUYdXG;e6pNvDVQ@}Fc! zl4tjq}afl zbUi1^4lZ*A8Zln)X0tF&xlVE7b$Gk$Hyn@VojoW0IWmq9>i0#v*WNhX(b+mx(?}py zJ}=)C%X#xM?mKY|BP%^m&HW!jU%8~bx=o53>?ZA9#z&sTMjs%MXOzBi0S*=R2CSulK zOKf`S$enxg*K;#JPiprmz?%_R?;~3r3NvqL-k8yd$kZ18+0b@L=I}yzKS}<;@{?oU zfj*%96ycW+sp~-(wX^elqNs=DFCO0?`y%PAwV`6nxDweT6X_om>C%7$^V)M`uiU=; zp)5I7C6kgeqIKi_$%{K3>CG$k=`<1NoESB6cFXBfevE&qH+4R$o8oWEFl{YHwS9BNNGmJKy? zke;4i8cBA$n?3jSvE+r797+WcFU&jivOVT$haqv`^UU$l#xkF(Yg+z7)9BL5aQE@` z%syL3oLzwxn|bn?X}Z>*b;>;#LPE#qsOm&H=h>%1h~A-dGgU-@9yQS0vY3>7v7?r{ z8pTrdA(P~ZQu9XwuptKG+4TxJSx(b6_-in)Ch zk$KpXe(vqU2ziPMhy&Jhm#{RDy0atKrRLeiz8T9$?w?9D;Luv{+jWub$@)OtalnCj zS0wwRUk7YtWahPe7nP2Bmf?Bf`t(+DNN2cy2gtq0B0Y_zkYJ@>lYWl zesJ;Y_ZERTyMrant_O!rb)}!mzg5+F?t5sqcieKpRnmsP*lA8*~REY%kpqMCWuAb;l+y)TJqI?6I%ZhlV`0~x~6~7 zSurG^{`tc&O(L4SBC-x#m4U30G-iFLE=aGt7@*-$vEXVyC>C)`^Y%oBsKK%Yp& zbKXV0m&)nVIv*Z9M56GPXs9ysgV&_txyZFzq0H~6!8m{~u-m0B%y}J1YP#XzLp~rs zd&+o)FV~v%GHIua7AN=jvuXi2yTS6L*U-&MrBbgJC=AjpdRpxY^mP;Xq&wped#P`Q zO#QOkrQFLWvpjhXPgtDb{Hwc?kDnzDnY6D_J@wILYViEXsSAkvL0*kz@N^_lsMEi_ zm1x&<`PS9{5qM{=bGb-0s(8n@U0E}4md`AQW>h0e|A2;c5O+|R*j2? zlGR47#AV%}V65WiUAc?!%N6L4dUDq15js4ltgA?@ZBNq_d1TqRSSn;iW}V|2**Mg0 zcNfPi_jdVFyIA;W^xBrd?2iGB(;PNFH%=>>p%Mb^WkqahK!2dXJ0;3P?^#u|yQoQ7 z&Li?eiTgX<9x`e6@ha*6uml~hydBFI^F`+t=abjO)+v-bl z?44pJQL{nVIgtF666+jT!w2viGLu=hG*3Knvu%ATXMpFOO0GT}$Kh#)1svEe+Kc|a zXvMG?ee0`cm$dACCk2D`)dY+em?!a;>rtd-v#zbxfWwWJNGFlW zkG4kDi{4G+!9^_&f)*}>uvrvFf>X$gsL1slwAoEYpNeIppfm+7pgC?vnTl2GWCY6^ zlKd<-Lli$j4`N2;N2Ev=oHS~W*sA>ekiZmoQxD%;<`}m*@b4g?o*go!%nr+CeJ8?p zue*ruz+N7_rV;f_UL;-&XIqy6~mL ze7r~ZeTZBZcCTc-9cs$FpVRpv@o7w@CaD$RAiUmp@(EZDa)@zQH~M;Uh=Ep?wsoDI z>d(%f>HQDIGpvCg9EYeU8-3IGvzEAZH@iCV$czUpq%z4$`ulz9{2Y_%_aQfnface$ zIQUY5ouc-Hor;r{E$_XkbGq8WX4Yj~MK#@RuLR7(adA`}YF#THdT{#s*~e>Yd@8gW z?+iOU><>Fn=eS6cOc1;T8?_&NX4uR!_Qx)GIZn4`>QbH{O6wC_ebY2*h|SVPLL;w{I!? zq%7gd9X>{dTtSRJ9ov&5sIhmfLK5IkPZ69!Pz6odQS&#f2L0IZM`waDT1IVWhZ30Z zQ-|bl%30j$AFu-)n3sKOve)#=Q^~@L1OYYP*DqI|6nr&Xsvq!3ntyX^Y@!lOU`03% z*)_J>c*Dd|Pw;9j@b^3aQ}^BX#ca!od*G6)6P>7*m) zbaz-6ALHVkA!}Rs=x*gN?qZU6wvj`^-~c(8vW}nbJknMv(U|21QgHI~xViqE#v}i9 zZ_7tU8kvP&p^jvkM0Jt8&WJJE$e06tj)24r@~q$iKH&|Klh$D!_>-ZiTc<7LX?SZg zzTVAsx+D;k#?2mwNnkw9tC=@U_DnQ zjh|1GvP^v zIrMx`J8wwZ=k*%Pf1e*S@>`Slqj!Sku-+*URf*l0OJ7WowydwrADRg_NrjN@y6=C zova5V4!n<9&Ev*quV*)S?-Rh;UCYNlaii6dOwS`NycW%aBKgXdX(&f_Va}%{T`268 zpcjY(zUepq_+Z7zfVJMm{6=rSY1a$p$++X0mg3@3>Kxjrtp;pdU zOovI(cO@Cayq$$+_$5uXj8@yVL#{Vcsrdj7IE$jj4i1rZ7DfqeH}=HJ_=*@^QeE44 zT|;=5y~bkaslu?9GS0uD54prX5jEde+`HaTelA;FPzqivZ#HyX^%six_^?n9NWsZZ zn~jC$4l5s@(`0Ts4FO%gT(I^>j3MJ?RJJ%tgUo7+AMg*h>-15d`J<&0cXN4VtH=Dpk>fbK^qVhU_ntpr!mgfC3TgP~@nzdC&JJItb}P;>8C$Rb z(+Ej^w5T&$J{yN)(l;sF`Q$G5nrJioVVCbfmgRDqqi@jf1A%XBfclJ1kt1bkhzxy; z=-E{%(^M8InIm|Wn(Bt$gchVSp*=yM-(cQZ9~#0nSy$WpH7!p3L{jU~Q*Q4ct7x{Z z`{bf@#&er>asKf#hAt;vxTnP1`{)(>b2la8lKln@t_y|yVly)ON3U;#$%mZfjeU8f zm5tQwOA6X`iHai)3cebC`a*j6QDtWyzTfN5oL6vq?yG_4(7}+o#!Zw)6|0Q7hQVo> zX+`UAHO~?+eZ^Rpf_)Ih0kRm`4d!ntPF7d*!qxo)U!KvY;N`x+UNZr`sJ*FY9C8qXme=j-?*cslKL&+FFKs=G8S=L(| z52baFOcvj>)8OMN2{AL_h0TgQ&0~5{tzy|FwS~s(V$r}RNNkzBN8&tKJG&-40O4?uGAKVLGJpDKUk;Kk~<0;i@@@)y(mRAaI@cD9ms+=8yx}P>0 zd3dhHd#~c`{%t*o1MSBJ^a^+nN*BkV?+Lvn_w(yB^!jF>#H~K4^-f`_(?Bfv-Od-B zp727I_QHdt+$XSTq$2iGrMr9H zjT}D8z{yWW(RI{)*eM`vAsy?f%~mziL2}4IqJXvVa}Tf5?b3`Sz=7>jiQM00n=WVg zT>Dtwn5k)-EH89{BlF7LmM3T<-bmxQWDqaRdt!BvTgs_h^8J-mUscoNWoniLha(7X z+7pYZ3fW9IPs2t*@>^Iee79sD&3LPHp<1X|Q+I2_%8TmF*y1t$kSW$#t<_Cj90?C+ zq?0@z1QY0=Xh_XvAo4$Y^42@=hP0J=N5StWmxoh;77q|Swm8;)kvm;VK=5w$n^T+aCtgT7O)8eh>Hu^G4mwgKjS@R3w$nEkvaIy{&FPDwJE9z9M$w$@#(f z*r*+x-5qS}u|B=}W_n5osT_4W)slzIxiO@frzhVl8=o@v4g~Yfj~D1U;m1ua|76HN z>t_6V^y+hBKO%^tuo%u$qkP1hY8V$ z?s&E8STk+QvBksl7u;c?^0Ww9z1#xc=n~n{DX8cQ8X4)uTVQ_x+Z74K&+>-s_m#Hmw#XQsjCkY{(-M`JtOb`cb_bu(u1H9kV zre?g1Tiie7xKSt>U)dm(Ovjco~ra%f#es3O3r`fZnosQL>(sRFdKI{}`aY2*1LPEj#@E1I}H0d1BAN+t= z5o4pu!FS{A4q@+j#z@f5It9#Kf|*U-l<2Il29K`%JcYCCadRa0OwPCU2U%ydy9Sm@ z?It72%>$EU*+iZalu}k20X-DBvqR%nPrW!rqbEH2kKFDc(;90zdvqEV@DS+1cE_M+_>@g48;|gdo+aDl8(0h%O3^u7 zU$)kHl661HrpR%RaR4be`B`Plr)j-R%sW5i6mt1^V&~@|#h0N2d9~)BhTZo+`c(WF zv>&*8L|GJVoO7AI9ClxwkiC!flJ{4}CiUEK=Gi{H%NE~6Y>oeJ2zg0uU z>!177f?W}|D+u(0pFQhNEZ>?+eh_y(eQsEhUUOlntzK$izEIkeLWO6f0H^o2d%Y(h zR#-1U?tS^=`f9!TmnW|VIw;4c6wzmOx3LeoOWiRT<8%kH{wjy{zkx)PIC}3irf0yQ#3>np}*tV$;&{ z$y|)T1f<~PcgE}VlC85s`LK$VEgdu4t?+^p^(rByq4x4362W(wbGtN@-+7|JOI9k6 z6Ae|#u%D(%gX>pWl^vF45?e7MIuAun>|TIzAOy&)XEY7d{n6R>6S|{2X6INGQFl}z z=L(KwmHq6ALuG0&asGX?OTaX!bEMA5FDpz>d(nq7&GR%#Zst`m9@Ag;jWGoW8b4v6 z7n&Q+);g9OCa$6oj6cc)$&agF+Gf~xuxT(^WM0(Y-;2|WCbE^y8GO~h-g5ThkIsTT zbsJu$FH85)a;8_+ofDjVXh@JqFbVX9qy+E2$1GUtYK<&po>CWPtk(Lh*WcbUEl!qJ z-NuaLy`wH4sbNvNT`Sc*VoekC@n!s@^{h3yVBhlw7N+>Tuvvc5k(Qu}~C z^!>*d*3a|}t-rp{vhm(u4>&N7AUw%uI%(tFYzrTWi@W*N2Zyf88kmM0+jJ$xmWCbX z!hJ!Ac_eQsI(D-HC0iTd7e0MKFrzuN^nHF=Yr>tCl~3gR-<1QqqCl@sYyE`F6Zh?{ zf_DFtkIx*pIcBy@jH3LJap3FxY<7KL6X3wSm$7jnxdwHLUHh^;pC68=*x_(XZ<`6P z=t}1Dc20@B=#6U^SB_5!=SSiLCcfU%{G9pWuu9%d3Og?EgLywo4Lz1rg!F-y7|2z* z+dq;W^axH)cfKuXH@t9XB!rf+oX&fjb|ZPdz_=_Ir*|)MQ<{kPnZ^6OY5lhuL>|Pq zed3cL8twkE0GaU41QT1J1(ISd5oeBUCa=j_?BQlX_h9wF>O*R_DUVB;W6yU#)vy(B z1N{rzoz8>W8*@tPq=zJPECxm~87$pp)F-PZXD)C$My{1owF51fM|jmBsrOo1#Uk6% zMC!J>hJT{c#AwViK^o16DnA|>R=MS(dnY&-k~aF4QJ$nE|0I>>!VTk z=>hEm+r8qMBXW%PlVmO4QP%gz6vzwoC%-IHsyRR(N@>@VUG{C@>}DH2*9t{nF|_Jd zSn;S!-aJ`JB3^3M5F{>p&L^>twHipl$xo+Gw_)6EF~LG1eVDPE+qU+d%sXPCBb0%T znHpOUNaXqfNgU+;J&ZT6494MK)jSqK9?KOa`<$lm^XK_{?cv4I^X^U!1f;MW3d0aL zIjV3+>)qzKreSM`ZWWKiZ3kbTFN`78;vYT*2aW-~SFm0{a{tF?F{jh6S=uxi0>(2wO$CA(!SZ?8uf_P!OgDYrm z%s$%32hO7RWA3OK6Y6xHZPylFx-KNPP|9Jh_a2OAn0MeylzYj*f$e5O!|HIm%<-1I zIe$~FA+;7s-zu$4qC6g)eMLS^z;ViW*i{UD_HQvxQx{ui4p`~wO?W;~FxQ_$7 z@Op2za>6y8GLeYp1QpdewTxEt{_*@Cor+_wNr?x}p!m9af!$+(m#G_BVt%86r)-j- z(lqZNpML>yg1ZV8)uaon^nETR?n$5r$NM_+%v;Z@be<(^Md9ejT~(^gi+onjW7#>2 zT3a)Q_LQPH|5~3e3`@DhXg79OaS|?AUGQv5byqQqR1F25mNumo9rHkW$({^5tXKyW0nKhTEm5&57?ekf{x6(Arg1 zY66-c{2iRx+o!IUD`t1(*{;pAXGd@0U66D1jqHlk4D$cr`(!j>1*f;^pXkffBVY2O z+9-ctBdHpnw43x`YOPZJ1D$&@YTEh>pg&-}Jof_!oI^8I;}3BrHTwZ_-NE4MJw^;&Na+EQLiV*VW=^%fadW&A%1kAFOw8%*|Pu-_iez zAft8Yg{-`vZH5*TG*-OS0_FX}URma=VEh3db-nhyGLE1uV`KiO<2pQ8&7NuKp5Eu$aC`4@Vm zo?zKUo~k`Jyxu-fH99S!g7fBA`?j#+IyCQU>)Kb~pDfTbm57vAKA(Cba~ex&_w}}h zL)VvVTO({GZDxxZ^LdIhYk&h1+p2>@PoBBhDHfsz3PTII;AYV@r?(3eF^^S4f`e|@ z?|cI!*uPP=9K)@_qhyn)@c!brxenoK80zoYjgRzVPO(l9lX9;EyYM*eny9K&xR<^| zPH3#5uSw4JSSCZ$-oGAlID|Ji9JM3x4Hrj|t<>~~kPj3Hg+NhIG;|wEg6=>mPzIC_ z6+tD?Q>Yqhgu0-9Xabsr7NJ#W9om6N@gPP#hyxE2!-J&pAbC7U0}ncb2N~l*SMeZE zJSYGUx`hWN;z1AapvQR7Gd!pX59-8&-r+%Wc+h7&=o=pN9SL_nvdE>wCWEI@h^B_cRfXMAjs05>?Kh>+rAdrmAV`a`U{h zHB==+%ef`Qmf&rITU)uGYBt>~&eaj%vh1IZ)!42#kmH)p$HSegph*8SGVA%!7`mDa z0s@mk;IjYU_&+8%mjoORwvs4nf~cvFEjDx@JCKB}NkBvdJ=E3IRish~QZ_3x`_QI22`tBIs0&*T~-Kt#9f6q~2h#fQ^ zcxD5NOR^LPqVo@%jtiQEA4@aaFNE9Wofp4Za`zlhgfcBxafl)w82BW^;F{Y%A}tke z35hCIyUMLImW;(8pQYhovxcgc91d4YM27Qm?UWN2)qD-fBKVV!$2+9Eep-dL@(wla zXZ#QlDa$LvL0P&UPeJ0ch5&v;Jr+}>wdeAKrSBL|89#IivRAgmld7Q-M-hjM6F_pQ zcdc@5_L{Q_&ez8MJm5Q(k*bj6JS<*LC12r6$1Y4n>E-ZwzeJxoOIdn9>+b=?HnNR$ zl1})b#N~bB6UsxkyIWFOm9w!j9)+q5PpWtGwoINX=_R+ZUG=hJumYK;fS9E&OxI)U zI+T&1AR;@5pY36PuydMNC0UocNX_qsV_;DA6u0nB(rS;8#%&pQ<^W3RTy;G?axG+bO&!@bOnk5|03p|&^09OD^mDvzRKzB{iD zPh7dB6KB2KLKQJd2sFuV`69z#dEOo|wP&2y+!zq59ZGi->+yMY=>AtmP1t>xsdBR$Gfsk^trErgL^6#TKQcihbVmh z<+bJA(%;hrd>lm5MLfDL3xP%@D8is)@c{N|(Rg|&7GczpRe{R z>y;e-wV*fRi|b!a=A@rt9{PyG=ZUwDms**xJrI4*z|UCn+ltc$uIard=aWx$9E%hg z{F%oHy8GFzfz(Vsuwjs5eHC$c+;-c|T`$`zYQ$)P3Hn~}U-vjopT(;#>XzLfHYhV1@WtU> zz7DTFXnCxL3*X2qpfZO>-uFi_YA&M_@(X3Ii6I% zXi=$pvOem;5krHu_H5i19A%xSuouG8FlA#s_B%|P2rD@GWn#PGK}dykh?ses@frP)1>(fCqF z-u3dB!MmOnFTQmObZ@GowEXhz)9jw4;o`q0G}t07{yE4;RWYN$zow}o4y`zT(ZHN zNv_B}lVX;?-)@m?R$Hhiu=XVjWe+68OzZog{ffgsqnS-iSGluEx>wC!eiJ{gP!sa~ zp6jcGz8cAR(%s6WH34(S4@T+fbwR21!qqX#Eg7dPyc?khaV(GWOVe;#dii z;+MF^XGHII;6t_qx9>YDCFuM#gEC=dy!{LFvqtVnM`&3=gEM0B4~81wZ4H=D_bDbt zK>7CKO7dc_mUO8v*_ls@>6FU`x1UUZ ztJ9)YU8R3p{c0HoMU81%Zivp8UGnv(*pPAKhR^hyVms2}P8;uYAn$lOv?R?=@w!~o z*`p2^ykkJa!Z7&RfCm18GKLyNba;)EhE|4D?D*_!u?i(e1|}gxA_mJ0X+-`hRTlOj zLox;hm@Y&v>LLrhnN;E$#Fd46nNHYg3_fDgh(5?gffLB(i(C|_YDC4-Fu2axg}4E^ zJ-V;K)P3q7no6Q-zu=D%OHxK(0;5pa7$gp^MLTA#OpP72pWs2BTgI zP~L=!ea9f%gf`<+gn`H)gNMhFAp~VZ5tf@!i8~Ni5%wULG9C(;>ZyoTZ0&s^nCL9O z(G2YS#3FxeVB`TSpF3q zo@)8BjW;t&m&#a%yp{S|de5z4&9;qYRVTcU_z`=zrsn4>L_jMVhkX--sUvdhvrDFP7FqUsB-6L5B0V%N6~}T19!E)nAK9!_{~GG&lM|v_)~sK* zHOmg~-N;zI)4v)IX%;k|;fySfbCY0nAfm(1(9<%LBKky4e^1ZznhQYo&?9S~BcUhM z!uO%+t01o5yQ38<%c?9j->H6Z+Wj*{XXR&GG43Z{)`Zn-I=<`Ee#GJPIu7^|nnHtb zQ`(^$qrc?$___r)t-jqi8nz_`Zg}@t_W(-yOq?a;hd+%ABU(0$)7uO}YN|oaLkAru zICVSe!t(PQ&I>%p!_O8pEwrP=p}FCjO++J{q@UNy#FZ(oENJ&^uQm~K>Fi*RIQKW~ zO}Z63jEC$cH21?O4Sc%5C2@E2+`QhSLtnabaGkA#2@`r2hX!y|^Ju?m<@4MlXj*(8 zA*yC`@jpjon}<#AL^cVip53l}@mxb}WV!*X@LHJ2WibLQTjD}YK(dRWkR=sUB0xP$ z8s88{h2EA_;t8Zgg?o`pg8=hpT^u#oiVTJXIBH3QOQ>|>NhF{KRoPVHDb!O9I-=TX zG` zE^S?J-EkoW$?~rRPY&(>Ex!kTXfsb%QCs=?*L0=egD+kgZQe?we@=#^{`8E0YV%o&3FLdGH-o6^kdpgEShtJEO8g2uJ`5 zPy|$<4m5x!&<46d9~c8mUz`4;fbV|I TestResult { }); // check total issuance of the faucet - assert_eq!( - native_faucet.storage().get_item(AccountStorage::faucet_sysdata_slot()).unwrap()[3], - Felt::new(999_777), - "Issuance mismatch" - ); + let metadata = TokenMetadata::try_from(native_faucet.storage()).unwrap(); + assert_eq!(metadata.token_supply(), Felt::new(999_777), "Issuance mismatch"); Ok(()) } diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index 08d68fe1b..b91a31634 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -81,7 +81,7 @@ impl GenesisState { Ok(BlockAccountUpdate::new( account.id(), - account.commitment(), + account.to_commitment(), account_update_details, )) }) diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 5fc0cc6c0..9e90bfa29 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -448,6 +448,7 @@ fn test_storage_map_incremental_updates() { #[test] fn test_empty_storage_map_entries_query() { use miden_protocol::account::auth::PublicKeyCommitment; + use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::{ AccountBuilder, AccountComponent, @@ -470,9 +471,12 @@ fn test_empty_storage_map_entries_query() { let component_code = CodeBuilder::default() .compile_component_code("test::interface", "pub proc test push.1 end") .unwrap(); - let account_component = AccountComponent::new(component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + component_code, + component_storage, + AccountComponentMetadata::new("test").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) From 218848c98ed1bff3d81f11d9fe04515689b24eb6 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 26 Feb 2026 21:49:42 +0100 Subject: [PATCH 58/77] fix/proto: move generatex code to `OUT_DIR` (#1703) --- .../install-protobuf-compiler/action.yml | 13 + .github/workflows/ci.yml | 33 +- .github/workflows/nightly.yml | 8 +- .github/workflows/publish-debian-all.yml | 4 +- .github/workflows/publish-debian.yml | 4 +- .github/workflows/publish-main.yml | 4 +- Cargo.lock | 2 + Makefile | 13 +- bin/remote-prover/Cargo.toml | 9 + bin/remote-prover/build.rs | 35 +- bin/remote-prover/src/generated/mod.rs | 4 +- .../src/generated/remote_prover.rs | 1003 ------ bin/remote-prover/src/server/tests.rs | 7 +- crates/proto/Cargo.toml | 5 + crates/proto/build.rs | 38 +- crates/proto/src/generated/account.rs | 99 - crates/proto/src/generated/block_producer.rs | 657 ---- crates/proto/src/generated/blockchain.rs | 115 - crates/proto/src/generated/mod.rs | 11 +- crates/proto/src/generated/note.rs | 163 - crates/proto/src/generated/primitives.rs | 98 - crates/proto/src/generated/remote_prover.rs | 1003 ------ crates/proto/src/generated/rpc.rs | 2074 ----------- crates/proto/src/generated/store.rs | 3183 ----------------- crates/proto/src/generated/transaction.rs | 59 - crates/proto/src/generated/validator.rs | 457 --- crates/remote-prover-client/Cargo.toml | 6 + crates/remote-prover-client/build.rs | 43 +- .../src/remote_prover/generated/nostd/mod.rs | 4 +- .../generated/nostd/remote_prover.rs | 442 --- .../src/remote_prover/generated/std/mod.rs | 4 +- .../generated/std/remote_prover.rs | 475 --- proto/build.rs | 23 +- scripts/check-features.sh | 3 +- 34 files changed, 131 insertions(+), 9970 deletions(-) create mode 100644 .github/actions/install-protobuf-compiler/action.yml delete mode 100644 bin/remote-prover/src/generated/remote_prover.rs delete mode 100644 crates/proto/src/generated/account.rs delete mode 100644 crates/proto/src/generated/block_producer.rs delete mode 100644 crates/proto/src/generated/blockchain.rs delete mode 100644 crates/proto/src/generated/note.rs delete mode 100644 crates/proto/src/generated/primitives.rs delete mode 100644 crates/proto/src/generated/remote_prover.rs delete mode 100644 crates/proto/src/generated/rpc.rs delete mode 100644 crates/proto/src/generated/store.rs delete mode 100644 crates/proto/src/generated/transaction.rs delete mode 100644 crates/proto/src/generated/validator.rs delete mode 100644 crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs delete mode 100644 crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs diff --git a/.github/actions/install-protobuf-compiler/action.yml b/.github/actions/install-protobuf-compiler/action.yml new file mode 100644 index 000000000..4ef5c3fc6 --- /dev/null +++ b/.github/actions/install-protobuf-compiler/action.yml @@ -0,0 +1,13 @@ + +name: "Install protobuf compiler" +description: "Install compiler for protobuf compilation" + +runs: + using: "composite" + steps: + - name: Install protobuf compiler + shell: bash + run: | + set -eux + sudo apt-get update + sudo apt-get install -y protobuf-compiler diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b8bea522e..cf3ceddf6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,7 +24,7 @@ on: env: # Shared prefix key for the rust cache. - # + # # This provides a convenient way to evict old or corrupted cache. RUST_CACHE_KEY: rust-cache-2026.02.02 # Reduce cache usage by removing debug information. @@ -47,8 +47,8 @@ jobs: - uses: actions/checkout@v6 - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 @@ -95,7 +95,7 @@ jobs: fi done echo "Static linkage check passed for all of ${bin_targets[@]}" - + clippy: name: lint - clippy runs-on: ubuntu-24.04 @@ -152,29 +152,6 @@ jobs: - name: Build docs run: cargo doc --no-deps --workspace --all-features --locked - # Ensures our checked-in protobuf generated code is aligned to the protobuf schema. - # - # We do this by rebuilding the generated code and ensuring there is no diff. - proto: - name: gRPC codegen - needs: [build] - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - - name: Rustup - run: rustup update --no-self-update - - name: Install protobuf - run: sudo apt-get update && sudo apt-get install -y protobuf-compiler - - uses: Swatinem/rust-cache@v2 - with: - shared-key: ${{ github.workflow }}-build - prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: false - - name: Rebuild protos - run: BUILD_PROTO=1 cargo check --all-features --all-targets --locked --workspace - - name: Diff check - run: git diff --exit-code - # Ensure the stress-test still functions by running some cheap benchmarks. stress-test: name: stress test @@ -204,7 +181,7 @@ jobs: cargo run --bin miden-node-stress-test seed-store \ --data-directory ${{ env.DATA_DIR }} \ --num-accounts 500 --public-accounts-percentage 50 - # TODO re-introduce + # TODO re-introduce # - name: Benchmark state sync # run: | # cargo run --bin miden-node-stress-test benchmark-store \ diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 1d3755341..52cf04234 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -23,8 +23,8 @@ jobs: ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Rustup run: rustup install beta && rustup default beta - uses: taiki-e/install-action@v2 @@ -45,8 +45,8 @@ jobs: ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Install rust run: rustup update --no-self-update - name: Install cargo-hack diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index 76e65d0eb..3aea36b5c 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -31,8 +31,8 @@ jobs: uses: actions/checkout@main with: fetch-depth: 0 - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Build and Publish Node uses: ./.github/actions/debian with: diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index d17d06532..be01b9d1e 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -60,8 +60,8 @@ jobs: with: fetch-depth: 0 - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Build and Publish Packages uses: ./.github/actions/debian diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-main.yml index fcaab36a8..f53033f74 100644 --- a/.github/workflows/publish-main.yml +++ b/.github/workflows/publish-main.yml @@ -18,8 +18,8 @@ jobs: with: fetch-depth: 0 ref: main - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler # Ensure the release tag refers to the latest commit on main. # Compare the commit SHA that triggered the workflow with the HEAD of the branch we just # checked out (main). diff --git a/Cargo.lock b/Cargo.lock index a08d6641e..7357d1a92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3275,8 +3275,10 @@ name = "miden-remote-prover" version = "0.14.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "clap", + "fs-err", "http 1.4.0", "humantime", "miden-block-prover", diff --git a/Makefile b/Makefile index 8eb443544..33ab72a88 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,6 @@ help: # -- variables ------------------------------------------------------------------------------------ WARNINGS=RUSTDOCFLAGS="-D warnings" -BUILD_PROTO=BUILD_PROTO=1 CONTAINER_RUNTIME ?= docker STRESS_TEST_DATA_DIR ?= stress-test-store-$(shell date +%Y%m%d-%H%M%S) @@ -86,7 +85,7 @@ test: ## Runs all tests .PHONY: check check: ## Check all targets and features for errors without code generation - ${BUILD_PROTO} cargo check --all-features --all-targets --locked --workspace + cargo check --all-features --all-targets --locked --workspace .PHONY: check-features check-features: ## Checks all feature combinations compile without warnings using cargo-hack @@ -96,22 +95,22 @@ check-features: ## Checks all feature combinations compile without warnings usin .PHONY: build build: ## Builds all crates and re-builds protobuf bindings for proto crates - ${BUILD_PROTO} cargo build --locked --workspace - ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build + cargo build --locked --workspace + cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build # --- installing ---------------------------------------------------------------------------------- .PHONY: install-node install-node: ## Installs node - ${BUILD_PROTO} cargo install --path bin/node --locked + cargo install --path bin/node --locked .PHONY: install-remote-prover install-remote-prover: ## Install remote prover's CLI - $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --locked + cargo install --path bin/remote-prover --bin miden-remote-prover --locked .PHONY: stress-test-smoke stress-test: ## Runs stress-test benchmarks - ${BUILD_PROTO} cargo build --release --locked -p miden-node-stress-test + cargo build --release --locked -p miden-node-stress-test @mkdir -p $(STRESS_TEST_DATA_DIR) ./target/release/miden-node-stress-test seed-store --data-directory $(STRESS_TEST_DATA_DIR) --num-accounts 500 --public-accounts-percentage 50 ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-state diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 7a3b6a059..60ae9f969 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -40,13 +40,22 @@ tower-http = { features = ["trace"], workspace = true } tracing = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } miden-protocol = { features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } [build-dependencies] +fs-err = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } miette = { features = ["fancy"], version = "7.5" } tonic-prost-build = { workspace = true } + +[package.metadata.cargo-machete] +ignored = [ + "http", + "prost", + "tonic-prost", # used in generated OUT_DIR code +] diff --git a/bin/remote-prover/build.rs b/bin/remote-prover/build.rs index 262ab49af..6183263eb 100644 --- a/bin/remote-prover/build.rs +++ b/bin/remote-prover/build.rs @@ -1,28 +1,28 @@ +use std::path::{Path, PathBuf}; + +use fs_err as fs; use miden_node_proto_build::remote_prover_api_descriptor; -use miette::IntoDiagnostic; +use miette::{IntoDiagnostic, WrapErr}; use tonic_prost_build::FileDescriptorSet; -/// Defines whether the build script should generate files in `/src`. -/// -/// The docs.rs build pipeline has a read-only filesystem, so we have to avoid writing to `src`, -/// otherwise the docs will fail to build there. Note that writing to `OUT_DIR` is fine. -const BUILD_GENERATED_FILES_IN_SRC: bool = option_env!("BUILD_PROTO").is_some(); - -const GENERATED_OUT_DIR: &str = "src/generated"; - /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { miden_node_rocksdb_cxx_linkage_fix::configure(); - println!("cargo:rerun-if-env-changed=BUILD_PROTO"); - if !BUILD_GENERATED_FILES_IN_SRC { - return Ok(()); - } + + let dst_dir = + PathBuf::from(std::env::var("OUT_DIR").expect("OUT_DIR should be set")).join("generated"); + + // Remove all existing files. + let _ = fs::remove_dir_all(&dst_dir); + fs::create_dir(&dst_dir) + .into_diagnostic() + .wrap_err("creating destination folder")?; // Get the file descriptor set let remote_prover_descriptor = remote_prover_api_descriptor(); // Build tonic code - build_tonic_from_descriptor(remote_prover_descriptor)?; + build_tonic_from_descriptor(remote_prover_descriptor, &dst_dir)?; Ok(()) } @@ -31,9 +31,12 @@ fn main() -> miette::Result<()> { // ================================================================================================ /// Builds tonic code from a `FileDescriptorSet` -fn build_tonic_from_descriptor(descriptor: FileDescriptorSet) -> miette::Result<()> { +fn build_tonic_from_descriptor( + descriptor: FileDescriptorSet, + dst_dir: &Path, +) -> miette::Result<()> { tonic_prost_build::configure() - .out_dir(GENERATED_OUT_DIR) + .out_dir(dst_dir) .build_server(true) .build_transport(true) .compile_fds_with_config(descriptor, tonic_prost_build::Config::new()) diff --git a/bin/remote-prover/src/generated/mod.rs b/bin/remote-prover/src/generated/mod.rs index f2af60274..c24a38e35 100644 --- a/bin/remote-prover/src/generated/mod.rs +++ b/bin/remote-prover/src/generated/mod.rs @@ -2,5 +2,7 @@ #![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[rustfmt::skip] -mod remote_prover; +pub mod remote_prover { + include!(concat!(env!("OUT_DIR"), "/generated/remote_prover.rs")); +} pub use remote_prover::*; diff --git a/bin/remote-prover/src/generated/remote_prover.rs b/bin/remote-prover/src/generated/remote_prover.rs deleted file mode 100644 index b504804c3..000000000 --- a/bin/remote-prover/src/generated/remote_prover.rs +++ /dev/null @@ -1,1003 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Generates a proof for the requested payload. - async fn prove( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.Api/Prove" => { - #[allow(non_camel_case_types)] - struct ProveSvc(pub Arc); - impl tonic::server::UnaryService - for ProveSvc { - type Response = super::Proof; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::prove(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ProveSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod proxy_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ProxyStatusApiServer. - #[async_trait] - pub trait ProxyStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the proxy. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ProxyStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ProxyStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ProxyStatusApiServer - where - T: ProxyStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.ProxyStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::ProxyStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ProxyStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.ProxyStatusApi"; - impl tonic::server::NamedService for ProxyStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod worker_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with WorkerStatusApiServer. - #[async_trait] - pub trait WorkerStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the worker. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct WorkerStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl WorkerStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for WorkerStatusApiServer - where - T: WorkerStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.WorkerStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::WorkerStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for WorkerStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.WorkerStatusApi"; - impl tonic::server::NamedService for WorkerStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/bin/remote-prover/src/server/tests.rs b/bin/remote-prover/src/server/tests.rs index 8172c344b..46bea96e7 100644 --- a/bin/remote-prover/src/server/tests.rs +++ b/bin/remote-prover/src/server/tests.rs @@ -3,6 +3,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; +use assert_matches::assert_matches; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::asset::{Asset, FungibleAsset}; use miden_protocol::batch::{ProposedBatch, ProvenBatch}; @@ -229,9 +230,9 @@ async fn capacity_is_respected() { result.sort_unstable(); assert_eq!(expected, result); - // We also expect that the error is a resource exhaustion error. - let err = first.err().or(second.err()).or(third.err()).unwrap(); - assert_eq!(err.code(), tonic::Code::ResourceExhausted); + assert_matches!(first.err().or(second.err()).or(third.err()), Some(err) => { + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + }); server.abort(); } diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 2e9767f88..5c308ae58 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -38,3 +38,8 @@ miden-node-proto-build = { features = ["internal"], workspace = true miden-node-rocksdb-cxx-linkage-fix = { workspace = true } miette = { version = "7.6" } tonic-prost-build = { workspace = true } + +[package.metadata.cargo-machete] +ignored = [ + "tonic-prost", # used in generated OUT_DIR code +] diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 4f64f4e9d..9c42bcb08 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -1,5 +1,4 @@ use std::env; -use std::fmt::Write; use std::path::{Path, PathBuf}; use fs_err as fs; @@ -15,29 +14,17 @@ use miden_node_proto_build::{ use miette::{Context, IntoDiagnostic}; use tonic_prost_build::FileDescriptorSet; -/// Generates Rust protobuf bindings using miden-node-proto-build. -/// -/// This is done only if `BUILD_PROTO` environment variable is set to `1` to avoid running the -/// script on crates.io where repo-level .proto files are not available. +/// Generates Rust protobuf bindings using `miden-node-proto-build`. fn main() -> miette::Result<()> { println!("cargo::rerun-if-changed=../../proto/proto"); - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); miden_node_rocksdb_cxx_linkage_fix::configure(); - // Skip this build script in BUILD_PROTO environment variable is not set to `1`. - if env::var("BUILD_PROTO").unwrap_or("0".to_string()) == "0" { - return Ok(()); - } - - let crate_root: PathBuf = - env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR should be set").into(); - let dst_dir = crate_root.join("src").join("generated"); + let dst_dir = + PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR should be set")).join("generated"); // Remove all existing files. - fs::remove_dir_all(&dst_dir) - .into_diagnostic() - .wrap_err("removing existing files")?; + let _ = fs::remove_dir_all(&dst_dir); fs::create_dir(&dst_dir) .into_diagnostic() .wrap_err("creating destination folder")?; @@ -72,12 +59,12 @@ fn generate_bindings(file_descriptors: FileDescriptorSet, dst_dir: &Path) -> mie } /// Generate `mod.rs` which includes all files in the folder as submodules. -fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { - let mod_filepath = directory.as_ref().join("mod.rs"); +fn generate_mod_rs(dst_dir: impl AsRef) -> std::io::Result<()> { + let mod_filepath = dst_dir.as_ref().join("mod.rs"); // Discover all submodules by iterating over the folder contents. let mut submodules = Vec::new(); - for entry in fs::read_dir(directory.as_ref())? { + for entry in fs::read_dir(dst_dir.as_ref())? { let entry = entry?; let path = entry.path(); if path.is_file() { @@ -93,17 +80,8 @@ fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { submodules.sort(); - // Lints we need to allow for the generated code. - let lints = ["pedantic", "large_enum_variant", "allow_attributes"]; - let lints = lints.into_iter().fold(String::new(), |mut s, lint| { - writeln!(s, " clippy::{lint},").unwrap(); - s - }); - let lints = - format!("#![expect(\n{lints} reason = \"generated by build.rs and tonic\"\n)]\n\n"); - let modules = submodules.iter().map(|f| format!("pub mod {f};\n")); - let contents = std::iter::once(lints).chain(modules).collect::(); + let contents = modules.into_iter().collect::(); fs::write(mod_filepath, contents) } diff --git a/crates/proto/src/generated/account.rs b/crates/proto/src/generated/account.rs deleted file mode 100644 index 6ff613562..000000000 --- a/crates/proto/src/generated/account.rs +++ /dev/null @@ -1,99 +0,0 @@ -// This file is @generated by prost-build. -/// Uniquely identifies a specific account. -/// -/// A Miden account ID is a 120-bit value derived from the commitments to account code and storage, -/// and a random user-provided seed. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -#[prost(skip_debug)] -pub struct AccountId { - /// 15 bytes (120 bits) encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::account::account_id::AccountId\]. - #[prost(bytes = "vec", tag = "1")] - pub id: ::prost::alloc::vec::Vec, -} -/// The state of an account at a specific block height. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountSummary { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The current account commitment or zero if the account does not exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - /// Block number at which the summary was made. - #[prost(uint32, tag = "3")] - pub block_num: u32, -} -/// Represents the storage header of an account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageHeader { - /// Storage slots with their types and data. - #[prost(message, repeated, tag = "1")] - pub slots: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `AccountStorageHeader`. -pub mod account_storage_header { - /// A single storage slot in the account storage header. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageSlot { - /// The name of the storage slot. - #[prost(string, tag = "1")] - pub slot_name: ::prost::alloc::string::String, - /// The type of the storage slot. - #[prost(uint32, tag = "2")] - pub slot_type: u32, - /// The data (Word) for this storage slot. - /// For value slots (slot_type=0), this is the actual value stored in the slot. - /// For map slots (slot_type=1), this is the root of the storage map. - #[prost(message, optional, tag = "3")] - pub commitment: ::core::option::Option, - } -} -/// An account details. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountDetails { - /// Account summary. - #[prost(message, optional, tag = "1")] - pub summary: ::core::option::Option, - /// Account details encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::account::Account\]. - #[prost(bytes = "vec", optional, tag = "2")] - pub details: ::core::option::Option<::prost::alloc::vec::Vec>, -} -/// An account header. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountHeader { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Vault root hash. - #[prost(message, optional, tag = "2")] - pub vault_root: ::core::option::Option, - /// Storage root hash. - #[prost(message, optional, tag = "3")] - pub storage_commitment: ::core::option::Option, - /// Code root hash. - #[prost(message, optional, tag = "4")] - pub code_commitment: ::core::option::Option, - /// Account nonce. - #[prost(uint64, tag = "5")] - pub nonce: u64, -} -/// An account witness. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountWitness { - /// Account ID for which this proof is requested. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The account ID within the proof, which may be different from the above account ID. - /// This can happen when the requested account ID's prefix matches the prefix of an existing - /// account ID in the tree. Then the witness will prove inclusion of this witness ID in the tree. - #[prost(message, optional, tag = "2")] - pub witness_id: ::core::option::Option, - /// The state commitment whose inclusion the witness proves. - #[prost(message, optional, tag = "3")] - pub commitment: ::core::option::Option, - /// The merkle path of the state commitment in the account tree. - #[prost(message, optional, tag = "4")] - pub path: ::core::option::Option, -} diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs deleted file mode 100644 index 9c95e6a75..000000000 --- a/crates/proto/src/generated/block_producer.rs +++ /dev/null @@ -1,657 +0,0 @@ -// This file is @generated by prost-build. -/// Request to subscribe to mempool events. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MempoolSubscriptionRequest { - /// The caller's current chain height. - /// - /// Request will be rejected if this does not match the mempool's current view. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, -} -/// Event from the mempool. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MempoolEvent { - #[prost(oneof = "mempool_event::Event", tags = "1, 2, 3")] - pub event: ::core::option::Option, -} -/// Nested message and enum types in `MempoolEvent`. -pub mod mempool_event { - /// A block was committed. - /// - /// This event is sent when a block is committed to the chain. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct BlockCommitted { - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - #[prost(message, repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec< - super::super::transaction::TransactionId, - >, - } - /// A transaction was added to the mempool. - /// - /// This event is sent when a transaction is added to the mempool. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct TransactionAdded { - /// The ID of the transaction. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Nullifiers consumed by the transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Network notes created by the transaction. - #[prost(message, repeated, tag = "3")] - pub network_notes: ::prost::alloc::vec::Vec, - /// Changes to a network account, if any. This includes creation of new network accounts. - /// - /// The account delta is encoded using \[winter_utils::Serializable\] implementation - /// for \[miden_protocol::account::delta::AccountDelta\]. - #[prost(bytes = "vec", optional, tag = "4")] - pub network_account_delta: ::core::option::Option<::prost::alloc::vec::Vec>, - } - /// A set of transactions was reverted and dropped from the mempool. - /// - /// This event is sent when a set of transactions are reverted and dropped from the mempool. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct TransactionsReverted { - #[prost(message, repeated, tag = "1")] - pub reverted: ::prost::alloc::vec::Vec, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Event { - #[prost(message, tag = "1")] - TransactionAdded(TransactionAdded), - #[prost(message, tag = "2")] - BlockCommitted(BlockCommitted), - #[prost(message, tag = "3")] - TransactionsReverted(TransactionsReverted), - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/Status", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("block_producer.Api", "Status")); - self.inner.unary(req, path, codec).await - } - /// Submits proven transaction to the Miden network. Returns the node's current block height. - pub async fn submit_proven_transaction( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransaction, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/SubmitProvenTransaction", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("block_producer.Api", "SubmitProvenTransaction"), - ); - self.inner.unary(req, path, codec).await - } - /// Submits a proven batch to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - pub async fn submit_proven_batch( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/SubmitProvenBatch", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("block_producer.Api", "SubmitProvenBatch")); - self.inner.unary(req, path, codec).await - } - /// Subscribe to mempool events. - /// - /// The request will be rejected if the caller and the mempool disagree on the current chain tip. - /// This prevents potential desync issues. The caller can resolve this by resync'ing its chain state. - /// - /// The event stream will contain all events after the chain tip. This includes all currently inflight - /// events that have not yet been committed to the chain. - /// - /// Currently only a single active subscription is supported. Subscription requests will cancel the active - /// subscription, if any. - pub async fn mempool_subscription( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/MempoolSubscription", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("block_producer.Api", "MempoolSubscription")); - self.inner.server_streaming(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits proven transaction to the Miden network. Returns the node's current block height. - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits a proven batch to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - async fn submit_proven_batch( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Server streaming response type for the MempoolSubscription method. - type MempoolSubscriptionStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > - + std::marker::Send - + 'static; - /// Subscribe to mempool events. - /// - /// The request will be rejected if the caller and the mempool disagree on the current chain tip. - /// This prevents potential desync issues. The caller can resolve this by resync'ing its chain state. - /// - /// The event stream will contain all events after the chain tip. This includes all currently inflight - /// events that have not yet been committed to the chain. - /// - /// Currently only a single active subscription is supported. Subscription requests will cancel the active - /// subscription, if any. - async fn mempool_subscription( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/block_producer.Api/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::super::rpc::BlockProducerStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer.Api/SubmitProvenTransaction" => { - #[allow(non_camel_case_types)] - struct SubmitProvenTransactionSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransaction, - > for SubmitProvenTransactionSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransaction, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_transaction(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenTransactionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer.Api/SubmitProvenBatch" => { - #[allow(non_camel_case_types)] - struct SubmitProvenBatchSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransactionBatch, - > for SubmitProvenBatchSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_batch(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenBatchSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer.Api/MempoolSubscription" => { - #[allow(non_camel_case_types)] - struct MempoolSubscriptionSvc(pub Arc); - impl< - T: Api, - > tonic::server::ServerStreamingService< - super::MempoolSubscriptionRequest, - > for MempoolSubscriptionSvc { - type Response = super::MempoolEvent; - type ResponseStream = T::MempoolSubscriptionStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::mempool_subscription(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = MempoolSubscriptionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "block_producer.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs deleted file mode 100644 index 135d763e1..000000000 --- a/crates/proto/src/generated/blockchain.rs +++ /dev/null @@ -1,115 +0,0 @@ -// This file is @generated by prost-build. -/// Represents a signed block. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SignedBlock { - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub body: ::core::option::Option, - #[prost(message, optional, tag = "3")] - pub signature: ::core::option::Option, -} -/// Represents a proposed block. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProposedBlock { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::ProposedBlock\]. - #[prost(bytes = "vec", tag = "1")] - pub proposed_block: ::prost::alloc::vec::Vec, -} -/// Represents a block or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeBlock { - /// The requested block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::Block\]. - #[prost(bytes = "vec", optional, tag = "1")] - pub block: ::core::option::Option<::prost::alloc::vec::Vec>, -} -/// Represents a block number. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockNumber { - /// The block number of the target block. - #[prost(fixed32, tag = "1")] - pub block_num: u32, -} -/// Represents a block number or nothing. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeBlockNumber { - /// The block number of the target block. - #[prost(fixed32, optional, tag = "1")] - pub block_num: ::core::option::Option, -} -/// Represents a block header. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeader { - /// Specifies the version of the protocol. - #[prost(uint32, tag = "1")] - pub version: u32, - /// The commitment of the previous blocks header. - #[prost(message, optional, tag = "2")] - pub prev_block_commitment: ::core::option::Option, - /// A unique sequential number of the current block. - #[prost(fixed32, tag = "3")] - pub block_num: u32, - /// A commitment to an MMR of the entire chain where each block is a leaf. - #[prost(message, optional, tag = "4")] - pub chain_commitment: ::core::option::Option, - /// A commitment to account database. - #[prost(message, optional, tag = "5")] - pub account_root: ::core::option::Option, - /// A commitment to the nullifier database. - #[prost(message, optional, tag = "6")] - pub nullifier_root: ::core::option::Option, - /// A commitment to all notes created in the current block. - #[prost(message, optional, tag = "7")] - pub note_root: ::core::option::Option, - /// A commitment to a set of IDs of transactions which affected accounts in this block. - #[prost(message, optional, tag = "8")] - pub tx_commitment: ::core::option::Option, - /// The validator's ECDSA public key. - #[prost(message, optional, tag = "9")] - pub validator_key: ::core::option::Option, - /// A commitment to all transaction kernels supported by this block. - #[prost(message, optional, tag = "10")] - pub tx_kernel_commitment: ::core::option::Option, - /// Fee parameters for block processing. - #[prost(message, optional, tag = "11")] - pub fee_parameters: ::core::option::Option, - /// The time when the block was created. - #[prost(fixed32, tag = "12")] - pub timestamp: u32, -} -/// Validator ECDSA public key. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ValidatorPublicKey { - /// Signature encoded using \[winter_utils::Serializable\] implementation for - /// \[crypto::dsa::ecdsa_k256_keccak::PublicKey\]. - #[prost(bytes = "vec", tag = "1")] - pub validator_key: ::prost::alloc::vec::Vec, -} -/// Block ECDSA Signature. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockSignature { - /// Signature encoded using \[winter_utils::Serializable\] implementation for - /// \[crypto::dsa::ecdsa_k256_keccak::Signature\]. - #[prost(bytes = "vec", tag = "1")] - pub signature: ::prost::alloc::vec::Vec, -} -/// Definition of the fee parameters. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct FeeParameters { - /// The faucet account ID which is used for native fee assets. - #[prost(message, optional, tag = "1")] - pub native_asset_id: ::core::option::Option, - /// The base fee (in base units) capturing the cost for the verification of a transaction. - #[prost(fixed32, tag = "2")] - pub verification_base_fee: u32, -} -/// Represents a block body. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockBody { - /// Block body data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::BlockBody\]. - #[prost(bytes = "vec", tag = "1")] - pub block_body: ::prost::alloc::vec::Vec, -} diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index 4ec0ae408..63dc1dfa2 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -5,13 +5,4 @@ reason = "generated by build.rs and tonic" )] -pub mod account; -pub mod block_producer; -pub mod blockchain; -pub mod note; -pub mod primitives; -pub mod remote_prover; -pub mod rpc; -pub mod store; -pub mod transaction; -pub mod validator; +include!(concat!(env!("OUT_DIR"), "/generated/mod.rs")); diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs deleted file mode 100644 index 8bff5858c..000000000 --- a/crates/proto/src/generated/note.rs +++ /dev/null @@ -1,163 +0,0 @@ -// This file is @generated by prost-build. -/// Represents a note's ID. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteId { - /// A unique identifier of the note which is a 32-byte commitment to the underlying note data. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// List of note IDs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NoteIdList { - /// List of note IDs to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub ids: ::prost::alloc::vec::Vec, -} -/// Represents a note's metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteMetadata { - /// The account which sent the note. - #[prost(message, optional, tag = "1")] - pub sender: ::core::option::Option, - /// The type of the note. - #[prost(enumeration = "NoteType", tag = "2")] - pub note_type: i32, - /// A value which can be used by the recipient(s) to identify notes intended for them. - /// - /// See `miden_protocol::note::note_tag` for more info. - #[prost(fixed32, tag = "3")] - pub tag: u32, - /// Serialized note attachment - /// - /// See `miden_protocol::note::NoteAttachment` for more info. - #[prost(bytes = "vec", tag = "4")] - pub attachment: ::prost::alloc::vec::Vec, -} -/// Represents a note. -/// -/// The note is composed of the note metadata and its serialized details. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Note { - /// The note's metadata. - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - /// Serialized note details (empty for private notes). - #[prost(bytes = "vec", optional, tag = "2")] - pub details: ::core::option::Option<::prost::alloc::vec::Vec>, -} -/// Represents a network note. -/// -/// Network notes are a subtype of public notes, and as such, their details are always publicly -/// known. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NetworkNote { - /// The note's metadata. - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - /// Serialized note details (i.e., assets and recipient). - #[prost(bytes = "vec", tag = "2")] - pub details: ::prost::alloc::vec::Vec, -} -/// Represents a committed note. -/// -/// A committed note is a note that has been included in a block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CommittedNote { - /// Either private, public, or network note. - #[prost(message, optional, tag = "1")] - pub note: ::core::option::Option, - /// The data needed to prove that the note is present in the chain. - #[prost(message, optional, tag = "2")] - pub inclusion_proof: ::core::option::Option, -} -/// Represents the result of getting committed notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CommittedNoteList { - /// List of committed notes. - #[prost(message, repeated, tag = "1")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Represents a proof of note's inclusion in a block. -/// -/// Does not include proof of the block's inclusion in the chain. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NoteInclusionInBlockProof { - /// A unique identifier of the note which is a 32-byte commitment to the underlying note data. - #[prost(message, optional, tag = "1")] - pub note_id: ::core::option::Option, - /// The block number in which the note was created. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - /// The index of the note in the block. - #[prost(uint32, tag = "3")] - pub note_index_in_block: u32, - /// The note's inclusion proof in the block. - #[prost(message, optional, tag = "4")] - pub inclusion_path: ::core::option::Option, -} -/// Represents proof of a note inclusion in the block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NoteSyncRecord { - /// A unique identifier of the note which is a 32-byte commitment to the underlying note data. - #[prost(message, optional, tag = "1")] - pub note_id: ::core::option::Option, - /// The index of the note in the block. - #[prost(uint32, tag = "2")] - pub note_index_in_block: u32, - /// The note's metadata. - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, - /// The note's inclusion proof in the block. - #[prost(message, optional, tag = "4")] - pub inclusion_path: ::core::option::Option, -} -/// Represents a note root. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteRoot { - /// The root of the note. - #[prost(message, optional, tag = "1")] - pub root: ::core::option::Option, -} -/// Represents a note script. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteScript { - /// Entrypoint of the script. - #[prost(uint32, tag = "1")] - pub entrypoint: u32, - /// Mast of the script. - #[prost(bytes = "vec", tag = "2")] - pub mast: ::prost::alloc::vec::Vec, -} -/// The type of a note. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum NoteType { - /// Unspecified note type (default value, should not be used). - Unspecified = 0, - /// Public note - details are visible on-chain. - Public = 1, - /// Private note - details are not visible on-chain. - Private = 2, -} -impl NoteType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unspecified => "NOTE_TYPE_UNSPECIFIED", - Self::Public => "NOTE_TYPE_PUBLIC", - Self::Private => "NOTE_TYPE_PRIVATE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "NOTE_TYPE_UNSPECIFIED" => Some(Self::Unspecified), - "NOTE_TYPE_PUBLIC" => Some(Self::Public), - "NOTE_TYPE_PRIVATE" => Some(Self::Private), - _ => None, - } - } -} diff --git a/crates/proto/src/generated/primitives.rs b/crates/proto/src/generated/primitives.rs deleted file mode 100644 index ea7f5a1a1..000000000 --- a/crates/proto/src/generated/primitives.rs +++ /dev/null @@ -1,98 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Asset { - /// Asset represented as a word. - #[prost(message, optional, tag = "1")] - pub asset: ::core::option::Option, -} -/// Represents a single SMT leaf entry. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SmtLeafEntry { - /// The key of the entry. - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option, - /// The value of the entry. - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option, -} -/// Multiple leaf entries when hash collisions occur at the same leaf position. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SmtLeafEntryList { - /// The list of entries at this leaf. - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, -} -/// A leaf in an SMT, sitting at depth 64. A leaf can contain 0, 1 or multiple leaf entries. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SmtLeaf { - #[prost(oneof = "smt_leaf::Leaf", tags = "1, 2, 3")] - pub leaf: ::core::option::Option, -} -/// Nested message and enum types in `SmtLeaf`. -pub mod smt_leaf { - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Leaf { - /// An empty leaf index. - #[prost(uint64, tag = "1")] - EmptyLeafIndex(u64), - /// A single leaf entry. - #[prost(message, tag = "2")] - Single(super::SmtLeafEntry), - /// Multiple leaf entries. - #[prost(message, tag = "3")] - Multiple(super::SmtLeafEntryList), - } -} -/// The opening of a leaf in an SMT. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SmtOpening { - /// The Merkle path to the leaf. - #[prost(message, optional, tag = "1")] - pub path: ::core::option::Option, - /// The leaf itself. - #[prost(message, optional, tag = "2")] - pub leaf: ::core::option::Option, -} -/// A different representation of a Merkle path designed for memory efficiency. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SparseMerklePath { - /// A bitmask representing empty nodes. - /// - /// The set bit corresponds to the depth of an empty node. The least significant bit (bit 0) - /// describes depth 1 node (root's children). The `bit index + 1` is equal to node's depth. - #[prost(fixed64, tag = "1")] - pub empty_nodes_mask: u64, - /// The non-empty nodes, stored in depth-order, but not contiguous across depth. - #[prost(message, repeated, tag = "2")] - pub siblings: ::prost::alloc::vec::Vec, -} -/// Represents an MMR delta. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MmrDelta { - /// The number of leaf nodes in the MMR. - #[prost(uint64, tag = "1")] - pub forest: u64, - /// New and changed MMR peaks. - #[prost(message, repeated, tag = "2")] - pub data: ::prost::alloc::vec::Vec, -} -/// Represents a Merkle path. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MerklePath { - /// List of sibling node hashes, in order from the root to the leaf. - #[prost(message, repeated, tag = "1")] - pub siblings: ::prost::alloc::vec::Vec, -} -/// A hash digest, the result of a hash function. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -#[prost(skip_debug)] -pub struct Digest { - #[prost(fixed64, tag = "1")] - pub d0: u64, - #[prost(fixed64, tag = "2")] - pub d1: u64, - #[prost(fixed64, tag = "3")] - pub d2: u64, - #[prost(fixed64, tag = "4")] - pub d3: u64, -} diff --git a/crates/proto/src/generated/remote_prover.rs b/crates/proto/src/generated/remote_prover.rs deleted file mode 100644 index b504804c3..000000000 --- a/crates/proto/src/generated/remote_prover.rs +++ /dev/null @@ -1,1003 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Generates a proof for the requested payload. - async fn prove( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.Api/Prove" => { - #[allow(non_camel_case_types)] - struct ProveSvc(pub Arc); - impl tonic::server::UnaryService - for ProveSvc { - type Response = super::Proof; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::prove(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ProveSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod proxy_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ProxyStatusApiServer. - #[async_trait] - pub trait ProxyStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the proxy. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ProxyStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ProxyStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ProxyStatusApiServer - where - T: ProxyStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.ProxyStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::ProxyStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ProxyStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.ProxyStatusApi"; - impl tonic::server::NamedService for ProxyStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod worker_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with WorkerStatusApiServer. - #[async_trait] - pub trait WorkerStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the worker. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct WorkerStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl WorkerStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for WorkerStatusApiServer - where - T: WorkerStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.WorkerStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::WorkerStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for WorkerStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.WorkerStatusApi"; - impl tonic::server::NamedService for WorkerStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs deleted file mode 100644 index 5cedf1208..000000000 --- a/crates/proto/src/generated/rpc.rs +++ /dev/null @@ -1,2074 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct RpcStatus { - /// The rpc component's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The genesis commitment. - #[prost(message, optional, tag = "2")] - pub genesis_commitment: ::core::option::Option, - /// The store status. - #[prost(message, optional, tag = "3")] - pub store: ::core::option::Option, - /// The block producer status. - #[prost(message, optional, tag = "4")] - pub block_producer: ::core::option::Option, -} -/// Represents the status of the block producer. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockProducerStatus { - /// The block producer's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The block producer's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// The block producer's current view of the chain tip height. - /// - /// This is the height of the latest block that the block producer considers - /// to be part of the canonical chain. - #[prost(fixed32, tag = "4")] - pub chain_tip: u32, - /// Statistics about the mempool. - #[prost(message, optional, tag = "3")] - pub mempool_stats: ::core::option::Option, -} -/// Statistics about the mempool. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MempoolStats { - /// Number of transactions currently in the mempool waiting to be batched. - #[prost(uint64, tag = "1")] - pub unbatched_transactions: u64, - /// Number of batches currently being proven. - #[prost(uint64, tag = "2")] - pub proposed_batches: u64, - /// Number of proven batches waiting for block inclusion. - #[prost(uint64, tag = "3")] - pub proven_batches: u64, -} -/// Represents the status of the store. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StoreStatus { - /// The store's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The store's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "3")] - pub chain_tip: u32, -} -/// Returns the block header corresponding to the requested block number, as well as the merkle -/// path and current forest which validate the block's inclusion in the chain. -/// -/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeaderByNumberRequest { - /// The target block height, defaults to latest if not provided. - #[prost(uint32, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Whether or not to return authentication data for the block header. - #[prost(bool, optional, tag = "2")] - pub include_mmr_proof: ::core::option::Option, -} -/// Represents the result of getting a block header by block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockHeaderByNumberResponse { - /// The requested block header. - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - #[prost(message, optional, tag = "2")] - pub mmr_path: ::core::option::Option, - /// Current chain length. - #[prost(fixed32, optional, tag = "3")] - pub chain_length: ::core::option::Option, -} -/// Represents a note script or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeNoteScript { - /// The script for a note by its root. - #[prost(message, optional, tag = "1")] - pub script: ::core::option::Option, -} -/// Defines the request for account details. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountRequest { - /// ID of the account for which we want to get data - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Optional block height at which to return the proof. - /// - /// Defaults to current chain tip if unspecified. - #[prost(message, optional, tag = "2")] - pub block_num: ::core::option::Option, - /// Request for additional account details; valid only for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountRequest`. -pub mod account_request { - /// Request the details for a public account. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetailRequest { - /// Last known code commitment to the requester. The response will include account code - /// only if its commitment is different from this value. - /// - /// If the field is ommiteed, the response will not include the account code. - #[prost(message, optional, tag = "1")] - pub code_commitment: ::core::option::Option, - /// Last known asset vault commitment to the requester. The response will include asset vault data - /// only if its commitment is different from this value. If the value is not present in the - /// request, the response will not contain one either. - /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested - /// separately, which is signaled in the response message with dedicated flag. - #[prost(message, optional, tag = "2")] - pub asset_vault_commitment: ::core::option::Option< - super::super::primitives::Digest, - >, - /// Additional request per storage map. - #[prost(message, repeated, tag = "3")] - pub storage_maps: ::prost::alloc::vec::Vec< - account_detail_request::StorageMapDetailRequest, - >, - } - /// Nested message and enum types in `AccountDetailRequest`. - pub mod account_detail_request { - /// Represents a storage slot index and the associated map keys. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapDetailRequest { - /// Storage slot name. - #[prost(string, tag = "1")] - pub slot_name: ::prost::alloc::string::String, - #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] - pub slot_data: ::core::option::Option, - } - /// Nested message and enum types in `StorageMapDetailRequest`. - pub mod storage_map_detail_request { - /// Indirection required for use in `oneof {..}` block. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapKeys { - /// A list of map keys associated with this storage slot. - #[prost(message, repeated, tag = "1")] - pub map_keys: ::prost::alloc::vec::Vec< - super::super::super::super::primitives::Digest, - >, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SlotData { - /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - /// the response will not contain them but must be requested separately. - #[prost(bool, tag = "2")] - AllEntries(bool), - /// A list of map keys associated with the given storage slot identified by `slot_name`. - #[prost(message, tag = "3")] - MapKeys(MapKeys), - } - } - } -} -/// Represents the result of getting account proof. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountResponse { - /// The block number at which the account witness was created and the account details were observed. - #[prost(message, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Account ID, current state commitment, and SMT path. - #[prost(message, optional, tag = "2")] - pub witness: ::core::option::Option, - /// Additional details for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountResponse`. -pub mod account_response { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetails { - /// Account header. - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Account storage data - #[prost(message, optional, tag = "2")] - pub storage_details: ::core::option::Option, - /// Account code; empty if code commitments matched or none was requested. - #[prost(bytes = "vec", optional, tag = "3")] - pub code: ::core::option::Option<::prost::alloc::vec::Vec>, - /// Account asset vault data; empty if vault commitments matched or the requester - /// omitted it in the request. - #[prost(message, optional, tag = "4")] - pub vault_details: ::core::option::Option, - } -} -/// Account vault details for AccountResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountVaultDetails { - /// A flag that is set to true if the account contains too many assets. This indicates - /// to the user that `SyncAccountVault` endpoint should be used to retrieve the - /// account's assets - #[prost(bool, tag = "1")] - pub too_many_assets: bool, - /// When too_many_assets == false, this will contain the list of assets in the - /// account's vault - #[prost(message, repeated, tag = "2")] - pub assets: ::prost::alloc::vec::Vec, -} -/// Account storage details for AccountResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageDetails { - /// Account storage header (storage slot info for up to 256 slots) - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Additional data for the requested storage maps - #[prost(message, repeated, tag = "2")] - pub map_details: ::prost::alloc::vec::Vec< - account_storage_details::AccountStorageMapDetails, - >, -} -/// Nested message and enum types in `AccountStorageDetails`. -pub mod account_storage_details { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountStorageMapDetails { - /// Storage slot name. - #[prost(string, tag = "1")] - pub slot_name: ::prost::alloc::string::String, - /// True when the number of entries exceeds the response limit. - /// When set, clients should use the `SyncAccountStorageMaps` endpoint. - #[prost(bool, tag = "2")] - pub too_many_entries: bool, - /// The map entries (with or without proofs). Empty when too_many_entries is true. - #[prost(oneof = "account_storage_map_details::Entries", tags = "3, 4")] - pub entries: ::core::option::Option, - } - /// Nested message and enum types in `AccountStorageMapDetails`. - pub mod account_storage_map_details { - /// Wrapper for repeated storage map entries including their proofs. - /// Used when specific keys are requested to enable client-side verification. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapEntriesWithProofs { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec< - map_entries_with_proofs::StorageMapEntryWithProof, - >, - } - /// Nested message and enum types in `MapEntriesWithProofs`. - pub mod map_entries_with_proofs { - /// Definition of individual storage entries including a proof. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapEntryWithProof { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "3")] - pub proof: ::core::option::Option< - super::super::super::super::primitives::SmtOpening, - >, - } - } - /// Wrapper for repeated storage map entries (without proofs). - /// Used when all entries are requested for small maps. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AllMapEntries { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - } - /// Nested message and enum types in `AllMapEntries`. - pub mod all_map_entries { - /// Definition of individual storage entries. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageMapEntry { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - } - } - /// The map entries (with or without proofs). Empty when too_many_entries is true. - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Entries { - /// All storage entries without proofs (for small maps or full requests). - #[prost(message, tag = "3")] - AllEntries(AllMapEntries), - /// Specific entries with their SMT proofs (for partial requests). - #[prost(message, tag = "4")] - EntriesWithProofs(MapEntriesWithProofs), - } - } -} -/// List of nullifiers to return proofs for. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NullifierList { - /// List of nullifiers to return proofs for. - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of checking nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNullifiersRequest { - /// Block number from which the nullifiers are requested (inclusive). - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Number of bits used for nullifier prefix. Currently the only supported value is 16. - #[prost(uint32, tag = "2")] - pub prefix_len: u32, - /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal - /// to `prefix_len`. - #[prost(uint32, repeated, tag = "3")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNullifiersResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of nullifiers matching the prefixes specified in the request. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SyncNullifiersResponse`. -pub mod sync_nullifiers_response { - /// Represents a single nullifier update. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierUpdate { - /// Nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// Block number. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Account vault synchronization request. -/// -/// Allows requesters to sync asset values for specific public accounts within a block range. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountVaultRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync asset vault. - #[prost(message, optional, tag = "2")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountVaultResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of asset updates for the account. - /// - /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` - /// is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountVaultUpdate { - /// Vault key associated with the asset. - #[prost(message, optional, tag = "1")] - pub vault_key: ::core::option::Option, - /// Asset value related to the vault key. - /// If not present, the asset was removed from the vault. - #[prost(message, optional, tag = "2")] - pub asset: ::core::option::Option, - /// Block number at which the above asset was updated in the account vault. - #[prost(fixed32, tag = "3")] - pub block_num: u32, -} -/// Note synchronization request. -/// -/// Specifies note tags that requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNotesRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "2")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing notes request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNotesResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - /// - /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of - /// an MMR of forest `chain_tip` with this path. - #[prost(message, optional, tag = "3")] - pub mmr_path: ::core::option::Option, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "4")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Chain MMR synchronization request. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncChainMmrRequest { - /// Block range from which to synchronize the chain MMR. - /// - /// The response will contain MMR delta starting after `block_range.block_from` up to - /// `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last - /// block already present in the caller's MMR so the delta begins at the next block. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, -} -/// Represents the result of syncing chain MMR. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncChainMmrResponse { - /// For which block range the MMR delta is returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_range.block_from + 1` to - /// `response.block_range.block_to` or the chain tip. - #[prost(message, optional, tag = "2")] - pub mmr_delta: ::core::option::Option, -} -/// Storage map synchronization request. -/// -/// Allows requesters to sync storage map values for specific public accounts within a block range, -/// with support for cursor-based pagination to handle large storage maps. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountStorageMapsRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync storage maps. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountStorageMapsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of storage map updates. - /// - /// Multiple updates can be returned for a single slot index and key combination, and the one - /// with a higher `block_num` is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -/// Represents a single storage map update. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapUpdate { - /// Block number in which the slot was updated. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Storage slot name. - #[prost(string, tag = "2")] - pub slot_name: ::prost::alloc::string::String, - /// The storage map key. - #[prost(message, optional, tag = "3")] - pub key: ::core::option::Option, - /// The storage map value. - #[prost(message, optional, tag = "4")] - pub value: ::core::option::Option, -} -/// Represents a block range. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockRange { - /// Block number from which to start (inclusive). - #[prost(fixed32, tag = "1")] - pub block_from: u32, - /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. - #[prost(fixed32, optional, tag = "2")] - pub block_to: ::core::option::Option, -} -/// Represents pagination information for chunked responses. -/// -/// Pagination is done using block numbers as the axis, allowing requesters to request -/// data in chunks by specifying block ranges and continuing from where the previous -/// response left off. -/// -/// To request the next chunk, the requester should use `block_num + 1` from the previous response -/// as the `block_from` for the next request. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct PaginationInfo { - /// Current chain tip - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// The block number of the last check included in this response. - /// - /// For chunked responses, this may be less than `request.block_range.block_to`. - /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request - /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Transactions synchronization request. -/// -/// Allows requesters to sync transactions for specific accounts within a block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Accounts to sync transactions for. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing transactions request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of transaction records. - #[prost(message, repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec, -} -/// Represents a transaction record. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionRecord { - /// Block number in which the transaction was included. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// A transaction header. - #[prost(message, optional, tag = "2")] - pub header: ::core::option::Option, -} -/// Represents the query parameter limits for RPC endpoints. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RpcLimits { - /// Maps RPC endpoint names to their parameter limits. - /// Key: endpoint name (e.g., "CheckNullifiers") - /// Value: map of parameter names to their limit values - #[prost(map = "string, message", tag = "1")] - pub endpoints: ::std::collections::HashMap< - ::prost::alloc::string::String, - EndpointLimits, - >, -} -/// Represents the parameter limits for a single endpoint. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EndpointLimits { - /// Maps parameter names to their limit values. - /// Key: parameter name (e.g., "nullifier", "account_id") - /// Value: limit value - #[prost(map = "string, uint32", tag = "1")] - pub parameters: ::std::collections::HashMap<::prost::alloc::string::String, u32>, -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// RPC API for the RPC component - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info of the node. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - pub async fn get_limits( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); - self.inner.unary(req, path, codec).await - } - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) - /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. - /// - /// Verify proofs against the nullifier tree root in the latest block header. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/CheckNullifiers"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest details of the specified account. - pub async fn get_account( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetAccount"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccount")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetBlockByNumber"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetNotesById"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Submits proven transaction to the Miden network. Returns the node's current block height. - pub async fn submit_proven_transaction( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransaction, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/SubmitProvenTransaction", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "SubmitProvenTransaction")); - self.inner.unary(req, path, codec).await - } - /// Submits a proven batch of transactions to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - pub async fn submit_proven_batch( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/SubmitProvenBatch", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SubmitProvenBatch")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the client to sync up to the tip of chain for the notes - /// they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to - /// search for new for matching notes for. The request will then return the next block containing - /// any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block - /// until reaching the tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_account_storage_maps( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/SyncAccountStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns MMR delta needed to synchronize the chain MMR within the requested block range. - pub async fn sync_chain_mmr( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncChainMmr"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncChainMmr")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info of the node. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - async fn get_limits( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) - /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. - /// - /// Verify proofs against the nullifier tree root in the latest block header. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest details of the specified account. - async fn get_account( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Submits proven transaction to the Miden network. Returns the node's current block height. - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits a proven batch of transactions to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - async fn submit_proven_batch( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the client to sync up to the tip of chain for the notes - /// they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to - /// search for new for matching notes for. The request will then return the next block containing - /// any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block - /// until reaching the tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_account_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns MMR delta needed to synchronize the chain MMR within the requested block range. - async fn sync_chain_mmr( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// RPC API for the RPC component - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/rpc.Api/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::RpcStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetLimits" => { - #[allow(non_camel_case_types)] - struct GetLimitsSvc(pub Arc); - impl tonic::server::UnaryService<()> for GetLimitsSvc { - type Response = super::RpcLimits; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_limits(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetLimitsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetAccount" => { - #[allow(non_camel_case_types)] - struct GetAccountSvc(pub Arc); - impl tonic::server::UnaryService - for GetAccountSvc { - type Response = super::AccountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetBlockHeaderByNumberSvc { - type Response = super::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SubmitProvenTransaction" => { - #[allow(non_camel_case_types)] - struct SubmitProvenTransactionSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransaction, - > for SubmitProvenTransactionSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransaction, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_transaction(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenTransactionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SubmitProvenBatch" => { - #[allow(non_camel_case_types)] - struct SubmitProvenBatchSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransactionBatch, - > for SubmitProvenBatchSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_batch(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenBatchSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncAccountStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncAccountStorageMapsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountStorageMapsSvc { - type Response = super::SyncAccountStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncChainMmr" => { - #[allow(non_camel_case_types)] - struct SyncChainMmrSvc(pub Arc); - impl tonic::server::UnaryService - for SyncChainMmrSvc { - type Response = super::SyncChainMmrResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_chain_mmr(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncChainMmrSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "rpc.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs deleted file mode 100644 index 49081b933..000000000 --- a/crates/proto/src/generated/store.rs +++ /dev/null @@ -1,3183 +0,0 @@ -// This file is @generated by prost-build. -/// Applies a block to the state. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ApplyBlockRequest { - /// Ordered batches encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::batch::OrderedBatches\]. - #[prost(bytes = "vec", tag = "1")] - pub ordered_batches: ::prost::alloc::vec::Vec, - /// Block signed by the Validator. - #[prost(message, optional, tag = "2")] - pub block: ::core::option::Option, -} -/// Returns data required to prove the next block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputsRequest { - /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - #[prost(message, repeated, tag = "1")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. - /// - /// Due to note erasure it will generally not be possible to know the exact set of nullifiers - /// a block will create, unless we pre-execute note erasure. So in practice, this set of - /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a - /// superset of the nullifiers the block may create. - /// - /// However, if it is known that a certain note will be erased, it would not be necessary to - /// provide a nullifier witness for it. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, - /// Array of block numbers referenced by all batches in the block. - #[prost(fixed32, repeated, tag = "4")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting block inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputs { - /// The latest block header. - #[prost(message, optional, tag = "1")] - pub latest_block_header: ::core::option::Option, - /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - /// the store**. - #[prost(message, repeated, tag = "2")] - pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< - super::note::NoteInclusionInBlockProof, - >, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the requested blocks - /// referenced by the batches in the block. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, - /// The state commitments of the requested accounts and their authentication paths. - #[prost(message, repeated, tag = "4")] - pub account_witnesses: ::prost::alloc::vec::Vec, - /// The requested nullifiers and their authentication paths. - #[prost(message, repeated, tag = "5")] - pub nullifier_witnesses: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `BlockInputs`. -pub mod block_inputs { - /// A nullifier returned as a response to the `GetBlockInputs`. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NullifierWitness { - /// The nullifier. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. - #[prost(message, optional, tag = "2")] - pub opening: ::core::option::Option, - } -} -/// Returns the inputs for a transaction batch. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputsRequest { - /// List of unauthenticated note commitments to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub note_commitments: ::prost::alloc::vec::Vec, - /// Set of block numbers referenced by transactions. - #[prost(fixed32, repeated, tag = "2")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting batch inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputs { - /// The block header that the transaction batch should reference. - #[prost(message, optional, tag = "1")] - pub batch_reference_block_header: ::core::option::Option< - super::blockchain::BlockHeader, - >, - /// Proof of each *found* unauthenticated note's inclusion in a block. - #[prost(message, repeated, tag = "2")] - pub note_proofs: ::prost::alloc::vec::Vec, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced - /// by the transactions in the batch. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, -} -/// Returns data required to validate a new transaction. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputsRequest { - /// ID of the account against which a transaction is executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of nullifiers consumed by this transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Set of unauthenticated note commitments to check for existence on-chain. - /// - /// These are notes which were not on-chain at the state the transaction was proven, - /// but could by now be present. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting transaction inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputs { - /// Account state proof. - #[prost(message, optional, tag = "1")] - pub account_state: ::core::option::Option< - transaction_inputs::AccountTransactionInputRecord, - >, - /// List of nullifiers that have been consumed. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec< - transaction_inputs::NullifierTransactionInputRecord, - >, - /// List of unauthenticated notes that were not found in the database. - #[prost(message, repeated, tag = "3")] - pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, - /// The node's current block height. - #[prost(fixed32, tag = "4")] - pub block_height: u32, - /// Whether the account ID prefix is unique. Only relevant for account creation requests. - /// - /// TODO: Replace this with an error. When a general error message exists. - #[prost(bool, optional, tag = "5")] - pub new_account_id_prefix_is_unique: ::core::option::Option, -} -/// Nested message and enum types in `TransactionInputs`. -pub mod transaction_inputs { - /// An account returned as a response to the `GetTransactionInputs`. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct AccountTransactionInputRecord { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The latest account commitment, zero commitment if the account doesn't exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - } - /// A nullifier returned as a response to the `GetTransactionInputs`. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierTransactionInputRecord { - /// The nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The block at which the nullifier has been consumed, zero if not consumed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Represents the result of getting network account details by ID. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeAccountDetails { - /// Account details. - #[prost(message, optional, tag = "1")] - pub details: ::core::option::Option, -} -/// Returns a paginated list of unconsumed network notes for an account. -/// -/// Notes created or consumed after the specified block are excluded from the result. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesRequest { - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - /// - /// Note that this token is only valid if used with the same parameters. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, - /// The full account ID to filter notes by. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, - /// The block number to filter the returned notes by. - /// - /// Notes that are created or consumed after this block are excluded from the result. - #[prost(fixed32, tag = "4")] - pub block_num: u32, -} -/// Represents the result of getting the unconsumed network notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnconsumedNetworkNotes { - /// An opaque pagination token. - /// - /// Use this in your next request to get the next - /// set of data. - /// - /// Will be null once there is no more data remaining. - #[prost(uint64, optional, tag = "1")] - pub next_token: ::core::option::Option, - /// The list of unconsumed network notes. - #[prost(message, repeated, tag = "2")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting the network account ids. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NetworkAccountIdList { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of network account ids. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Current blockchain data based on the requested block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CurrentBlockchainData { - /// Commitments that represent the current state according to the MMR. - #[prost(message, repeated, tag = "1")] - pub current_peaks: ::prost::alloc::vec::Vec, - /// Current block header. - #[prost(message, optional, tag = "2")] - pub current_block_header: ::core::option::Option, -} -/// Request for vault asset witnesses for a specific account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct VaultAssetWitnessesRequest { - /// The account ID for which to retrieve vault asset witnesses. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of asset vault keys to retrieve witnesses for. - #[prost(message, repeated, tag = "2")] - pub vault_keys: ::prost::alloc::vec::Vec, - /// The witnesses returned correspond to the account state at the specified block number. - /// - /// Optional block number. If not provided, uses the latest state. - /// - /// The specified block number should be relatively near the chain tip else an error will be - /// returned. - #[prost(fixed32, optional, tag = "3")] - pub block_num: ::core::option::Option, -} -/// Response containing vault asset witnesses. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct VaultAssetWitnessesResponse { - /// Block number at which the witnesses were generated. - /// - /// The witnesses returned corresponds to the account state at the specified block number. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// List of asset witnesses. - #[prost(message, repeated, tag = "2")] - pub asset_witnesses: ::prost::alloc::vec::Vec< - vault_asset_witnesses_response::VaultAssetWitness, - >, -} -/// Nested message and enum types in `VaultAssetWitnessesResponse`. -pub mod vault_asset_witnesses_response { - /// A vault asset witness containing the asset and its proof. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct VaultAssetWitness { - /// The SMT opening proof for the asset's inclusion in the vault. - #[prost(message, optional, tag = "1")] - pub proof: ::core::option::Option, - } -} -/// Request for a storage map witness for a specific account and storage slot. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapWitnessRequest { - /// The account ID for which to retrieve the storage map witness. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The raw, user-provided storage map key for which to retrieve the witness. - #[prost(message, optional, tag = "2")] - pub map_key: ::core::option::Option, - /// Optional block number. If not provided, uses the latest state. - /// - /// The witness returned corresponds to the account state at the specified block number. - /// - /// The specified block number should be relatively near the chain tip else an error will be - /// returned. - #[prost(fixed32, optional, tag = "3")] - pub block_num: ::core::option::Option, - /// The storage slot name for the map. - #[prost(string, tag = "4")] - pub slot_name: ::prost::alloc::string::String, -} -/// Response containing a storage map witness. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StorageMapWitnessResponse { - /// The storage map witness. - #[prost(message, optional, tag = "1")] - pub witness: ::core::option::Option, - /// Block number at which the witness was generated. - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Nested message and enum types in `StorageMapWitnessResponse`. -pub mod storage_map_witness_response { - /// Storage map witness data. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageWitness { - /// The raw, user-provided storage map key. - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option, - /// The SMT opening proof for the key-value pair. - #[prost(message, optional, tag = "3")] - pub proof: ::core::option::Option, - } -} -/// Generated client implementations. -pub mod rpc_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the RPC component - #[derive(Debug, Clone)] - pub struct RpcClient { - inner: tonic::client::Grpc, - } - impl RpcClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl RpcClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> RpcClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - RpcClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof - /// * `single` or `multiple`: Inclusion proof if the nullifier key is present - /// - /// Verify proofs against the nullifier tree root in the latest block header. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/CheckNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest details the specified account. - pub async fn get_account( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetAccount"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccount")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetBlockByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of committed notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetNotesById"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNullifiers"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns chain MMR updates within a block range. - pub async fn sync_chain_mmr( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncChainMmr"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncChainMmr")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncAccountVault", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_account_storage_maps( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::SyncAccountStorageMapsRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncAccountStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "SyncAccountStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncTransactions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod rpc_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. - #[async_trait] - pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof - /// * `single` or `multiple`: Inclusion proof if the nullifier key is present - /// - /// Verify proofs against the nullifier tree root in the latest block header. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest details the specified account. - async fn get_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of committed notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns chain MMR updates within a block range. - async fn sync_chain_mmr( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_account_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the RPC component - #[derive(Debug)] - pub struct RpcServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl RpcServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for RpcServer - where - T: Rpc, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/store.Rpc/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::super::rpc::StoreStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::super::rpc::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetAccount" => { - #[allow(non_camel_case_types)] - struct GetAccountSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetAccountSvc { - type Response = super::super::rpc::AccountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::rpc::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::rpc::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncNullifiersRequest, - > for SyncNullifiersSvc { - type Response = super::super::rpc::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncNullifiersRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::super::rpc::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncChainMmr" => { - #[allow(non_camel_case_types)] - struct SyncChainMmrSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncChainMmrSvc { - type Response = super::super::rpc::SyncChainMmrResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncChainMmrRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_chain_mmr(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncChainMmrSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncAccountVaultRequest, - > for SyncAccountVaultSvc { - type Response = super::super::rpc::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncAccountVaultRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncAccountStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncAccountStorageMapsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncAccountStorageMapsRequest, - > for SyncAccountStorageMapsSvc { - type Response = super::super::rpc::SyncAccountStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncAccountStorageMapsRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncTransactionsRequest, - > for SyncTransactionsSvc { - type Response = super::super::rpc::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncTransactionsRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for RpcServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "store.Rpc"; - impl tonic::server::NamedService for RpcServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod block_producer_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the BlockProducer component - #[derive(Debug, Clone)] - pub struct BlockProducerClient { - inner: tonic::client::Grpc, - } - impl BlockProducerClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl BlockProducerClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> BlockProducerClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - BlockProducerClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Applies changes of a new block to the DB and in-memory data structures. - pub async fn apply_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/ApplyBlock", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "ApplyBlock")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("store.BlockProducer", "GetBlockHeaderByNumber"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to prove the next block. - pub async fn get_block_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetBlockInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "GetBlockInputs")); - self.inner.unary(req, path, codec).await - } - /// Returns the inputs for a transaction batch. - pub async fn get_batch_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetBatchInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "GetBatchInputs")); - self.inner.unary(req, path, codec).await - } - /// Returns data required to validate a new transaction. - pub async fn get_transaction_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetTransactionInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "GetTransactionInputs")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod block_producer_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. - #[async_trait] - pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { - /// Applies changes of a new block to the DB and in-memory data structures. - async fn apply_block( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns data required to prove the next block. - async fn get_block_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns the inputs for a transaction batch. - async fn get_batch_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns data required to validate a new transaction. - async fn get_transaction_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the BlockProducer component - #[derive(Debug)] - pub struct BlockProducerServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl BlockProducerServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for BlockProducerServer - where - T: BlockProducer, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/store.BlockProducer/ApplyBlock" => { - #[allow(non_camel_case_types)] - struct ApplyBlockSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for ApplyBlockSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::apply_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ApplyBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService< - super::super::rpc::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::rpc::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetBlockInputs" => { - #[allow(non_camel_case_types)] - struct GetBlockInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBlockInputsSvc { - type Response = super::BlockInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetBatchInputs" => { - #[allow(non_camel_case_types)] - struct GetBatchInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBatchInputsSvc { - type Response = super::BatchInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_batch_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBatchInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetTransactionInputs" => { - #[allow(non_camel_case_types)] - struct GetTransactionInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetTransactionInputsSvc { - type Response = super::TransactionInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_transaction_inputs( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetTransactionInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for BlockProducerServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "store.BlockProducer"; - impl tonic::server::NamedService for BlockProducerServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod ntx_builder_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the network transaction builder component - #[derive(Debug, Clone)] - pub struct NtxBuilderClient { - inner: tonic::client::Grpc, - } - impl NtxBuilderClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl NtxBuilderClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> NtxBuilderClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of unconsumed network notes. - pub async fn get_unconsumed_network_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetUnconsumedNetworkNotes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("store.NtxBuilder", "GetUnconsumedNetworkNotes"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - pub async fn get_current_blockchain_data( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetCurrentBlockchainData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetCurrentBlockchainData")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of a network account with the specified account ID. - pub async fn get_network_account_details_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNetworkAccountDetailsById", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountDetailsById"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a list of all network account ids. - pub async fn get_network_account_ids( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNetworkAccountIds", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountIds")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest details of the specified account. - pub async fn get_account( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetAccount", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetAccount")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns vault asset witnesses for the specified account. - pub async fn get_vault_asset_witnesses( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetVaultAssetWitnesses", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetVaultAssetWitnesses")); - self.inner.unary(req, path, codec).await - } - /// Returns a storage map witness for the specified account and storage map entry. - pub async fn get_storage_map_witness( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetStorageMapWitness", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetStorageMapWitness")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod ntx_builder_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. - #[async_trait] - pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of unconsumed network notes. - async fn get_unconsumed_network_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - async fn get_current_blockchain_data( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of a network account with the specified account ID. - async fn get_network_account_details_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of all network account ids. - async fn get_network_account_ids( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest details of the specified account. - async fn get_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns vault asset witnesses for the specified account. - async fn get_vault_asset_witnesses( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a storage map witness for the specified account and storage map entry. - async fn get_storage_map_witness( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the network transaction builder component - #[derive(Debug)] - pub struct NtxBuilderServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl NtxBuilderServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for NtxBuilderServer - where - T: NtxBuilder, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/store.NtxBuilder/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::rpc::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::rpc::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetUnconsumedNetworkNotes" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetUnconsumedNetworkNotesSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetCurrentBlockchainData" => { - #[allow(non_camel_case_types)] - struct GetCurrentBlockchainDataSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::blockchain::MaybeBlockNumber, - > for GetCurrentBlockchainDataSvc { - type Response = super::CurrentBlockchainData; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::MaybeBlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_current_blockchain_data( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetCurrentBlockchainDataSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetNetworkAccountDetailsById" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByIdSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByIdSvc { - type Response = super::MaybeAccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_details_by_id( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountDetailsByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetNetworkAccountIds" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountIdsSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountIdsSvc { - type Response = super::NetworkAccountIdList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_ids(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountIdsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetAccount" => { - #[allow(non_camel_case_types)] - struct GetAccountSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetAccountSvc { - type Response = super::super::rpc::AccountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::rpc::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetVaultAssetWitnesses" => { - #[allow(non_camel_case_types)] - struct GetVaultAssetWitnessesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetVaultAssetWitnessesSvc { - type Response = super::VaultAssetWitnessesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_vault_asset_witnesses( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetVaultAssetWitnessesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetStorageMapWitness" => { - #[allow(non_camel_case_types)] - struct GetStorageMapWitnessSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetStorageMapWitnessSvc { - type Response = super::StorageMapWitnessResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_storage_map_witness(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetStorageMapWitnessSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for NtxBuilderServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "store.NtxBuilder"; - impl tonic::server::NamedService for NtxBuilderServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/transaction.rs b/crates/proto/src/generated/transaction.rs deleted file mode 100644 index a9dc784d6..000000000 --- a/crates/proto/src/generated/transaction.rs +++ /dev/null @@ -1,59 +0,0 @@ -// This file is @generated by prost-build. -/// Submits proven transaction to the Miden network. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProvenTransaction { - /// Transaction encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. - #[prost(bytes = "vec", tag = "1")] - pub transaction: ::prost::alloc::vec::Vec, - /// Transaction inputs encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::transaction::TransactionInputs\]. - #[prost(bytes = "vec", optional, tag = "2")] - pub transaction_inputs: ::core::option::Option<::prost::alloc::vec::Vec>, -} -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProvenTransactionBatch { - /// Encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. - #[prost(bytes = "vec", tag = "1")] - pub encoded: ::prost::alloc::vec::Vec, -} -/// Represents a transaction ID. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct TransactionId { - /// The transaction ID. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Represents a transaction summary. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct TransactionSummary { - /// A unique 32-byte identifier of a transaction. - #[prost(message, optional, tag = "1")] - pub transaction_id: ::core::option::Option, - /// The block number in which the transaction was executed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - /// The ID of the account affected by the transaction. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -/// Represents a transaction header. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionHeader { - /// ID of the account against which the transaction was executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// State commitment of the account before the transaction was executed. - #[prost(message, optional, tag = "2")] - pub initial_state_commitment: ::core::option::Option, - /// State commitment of the account after the transaction was executed. - #[prost(message, optional, tag = "3")] - pub final_state_commitment: ::core::option::Option, - /// Nullifiers of the input notes of the transaction. - #[prost(message, repeated, tag = "4")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Output notes of the transaction. - #[prost(message, repeated, tag = "5")] - pub output_notes: ::prost::alloc::vec::Vec, -} diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs deleted file mode 100644 index 39869d9fc..000000000 --- a/crates/proto/src/generated/validator.rs +++ /dev/null @@ -1,457 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the validator. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ValidatorStatus { - /// The validator's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The validator's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Validator API for the Validator component. - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info of the validator. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/validator.Api/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("validator.Api", "Status")); - self.inner.unary(req, path, codec).await - } - /// Submits a transaction to the validator. - pub async fn submit_proven_transaction( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransaction, - >, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/validator.Api/SubmitProvenTransaction", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("validator.Api", "SubmitProvenTransaction")); - self.inner.unary(req, path, codec).await - } - /// Validates a proposed block and returns the block header and body. - pub async fn sign_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/validator.Api/SignBlock"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("validator.Api", "SignBlock")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info of the validator. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Submits a transaction to the validator. - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Validates a proposed block and returns the block header and body. - async fn sign_block( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Validator API for the Validator component. - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/validator.Api/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::ValidatorStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/validator.Api/SubmitProvenTransaction" => { - #[allow(non_camel_case_types)] - struct SubmitProvenTransactionSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransaction, - > for SubmitProvenTransactionSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransaction, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_transaction(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenTransactionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/validator.Api/SignBlock" => { - #[allow(non_camel_case_types)] - struct SignBlockSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::blockchain::ProposedBlock, - > for SignBlockSvc { - type Response = super::super::blockchain::BlockSignature; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::ProposedBlock, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sign_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SignBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "validator.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index e21d19f18..811df0995 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -45,3 +45,9 @@ fs-err = { workspace = true } miden-node-proto-build = { workspace = true } miette = { features = ["fancy"], version = "7.5" } tonic-prost-build = { workspace = true } + +[package.metadata.cargo-machete] +ignored = [ + "prost", + "tonic-prost", # used in generated OUT_DIR code +] diff --git a/crates/remote-prover-client/build.rs b/crates/remote-prover-client/build.rs index ffd9b2e71..226f51332 100644 --- a/crates/remote-prover-client/build.rs +++ b/crates/remote-prover-client/build.rs @@ -1,37 +1,35 @@ -use std::fs; use std::io::Write; +use std::path::{Path, PathBuf}; +use fs_err as fs; use miden_node_proto_build::remote_prover_api_descriptor; -use miette::IntoDiagnostic; +use miette::{Context, IntoDiagnostic}; use tonic_prost_build::FileDescriptorSet; -/// Defines whether the build script should generate files in `/src`. -/// -/// The docs.rs build pipeline has a read-only filesystem, so we have to avoid writing to `src`, -/// otherwise the docs will fail to build there. Note that writing to `OUT_DIR` is fine. -const BUILD_GENERATED_FILES_IN_SRC: bool = option_env!("BUILD_PROTO").is_some(); - -const GENERATED_OUT_DIR: &str = "src/remote_prover/generated"; - /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); - if !BUILD_GENERATED_FILES_IN_SRC { - return Ok(()); - } + let dst_dir = + PathBuf::from(std::env::var("OUT_DIR").expect("OUT_DIR is always set for build.rs")) + .join("generated"); + + // Remove all existing files. + let _ = fs::remove_dir_all(&dst_dir); + fs::create_dir(&dst_dir) + .into_diagnostic() + .wrap_err("creating destination folder")?; let remote_prover_descriptor = remote_prover_api_descriptor(); // Build std version - let std_path = format!("{GENERATED_OUT_DIR}/std"); - build_tonic_from_descriptor(remote_prover_descriptor.clone(), std_path, true)?; + let std_path = dst_dir.join("std"); + build_tonic_from_descriptor(remote_prover_descriptor.clone(), &std_path, true)?; // Build nostd version - let nostd_path = format!("{GENERATED_OUT_DIR}/nostd"); - build_tonic_from_descriptor(remote_prover_descriptor, nostd_path.clone(), false)?; + let nostd_path = dst_dir.join("nostd"); + build_tonic_from_descriptor(remote_prover_descriptor, &nostd_path, false)?; // Convert nostd version to use core/alloc instead of std - let nostd_file_path = format!("{nostd_path}/remote_prover.rs"); + let nostd_file_path = nostd_path.join("remote_prover.rs"); convert_to_nostd(&nostd_file_path)?; Ok(()) @@ -43,11 +41,12 @@ fn main() -> miette::Result<()> { /// Builds tonic code from a `FileDescriptorSet` with specified configuration fn build_tonic_from_descriptor( descriptor: FileDescriptorSet, - out_dir: String, + dst_dir: &Path, build_transport: bool, ) -> miette::Result<()> { + fs::create_dir_all(dst_dir).into_diagnostic()?; tonic_prost_build::configure() - .out_dir(out_dir) + .out_dir(dst_dir) .build_server(false) .build_transport(build_transport) .compile_fds_with_config(descriptor, tonic_prost_build::Config::new()) @@ -55,7 +54,7 @@ fn build_tonic_from_descriptor( } /// Replaces std references with core and alloc for nostd compatibility -fn convert_to_nostd(file_path: &str) -> miette::Result<()> { +fn convert_to_nostd(file_path: &Path) -> miette::Result<()> { let file_content = fs_err::read_to_string(file_path).into_diagnostic()?; let updated_content = file_content .replace("std::result", "core::result") diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs index 50f334d6a..16cf30145 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs @@ -1,2 +1,4 @@ #[rustfmt::skip] -pub mod remote_prover; +pub mod remote_prover { + include!(concat!(env!("OUT_DIR"), "/generated/nostd/remote_prover.rs")); +} diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs deleted file mode 100644 index 1074dd5b8..000000000 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs +++ /dev/null @@ -1,442 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + core::marker::Send + 'static, - ::Error: Into + core::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + core::marker::Send + core::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> core::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - alloc::format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + core::marker::Send + 'static, - ::Error: Into + core::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + core::marker::Send + core::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> core::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - alloc::format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + core::marker::Send + 'static, - ::Error: Into + core::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + core::marker::Send + core::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> core::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - alloc::format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs index 50f334d6a..0f91ccd1d 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs @@ -1,2 +1,4 @@ #[rustfmt::skip] -pub mod remote_prover; +pub mod remote_prover { + include!(concat!(env!("OUT_DIR"), "/generated/std/remote_prover.rs")); +} diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs deleted file mode 100644 index 7be124daa..000000000 --- a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs +++ /dev/null @@ -1,475 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/proto/build.rs b/proto/build.rs index 3d4047e24..73db17cab 100644 --- a/proto/build.rs +++ b/proto/build.rs @@ -31,52 +31,53 @@ fn main() -> miette::Result<()> { println!("cargo::rerun-if-changed=./proto"); println!("cargo::rerun-if-env-changed=BUILD_PROTO"); - let out = - env::var("OUT_DIR").expect("env::OUT_DIR is always set in build.rs when used with cargo"); + let out_dir = PathBuf::from( + env::var("OUT_DIR").expect("env::OUT_DIR is always set in build.rs when used with cargo"), + ); let crate_root: PathBuf = env!("CARGO_MANIFEST_DIR").into(); - let proto_dir = crate_root.join("proto"); - let includes = &[proto_dir]; + let proto_src_dir = crate_root.join("proto"); + let includes = &[proto_src_dir]; let rpc_file_descriptor = protox::compile([RPC_PROTO], includes)?; - let rpc_path = PathBuf::from(&out).join(RPC_DESCRIPTOR); + let rpc_path = out_dir.join(RPC_DESCRIPTOR); fs::write(&rpc_path, rpc_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing rpc file descriptor")?; let remote_prover_file_descriptor = protox::compile([REMOTE_PROVER_PROTO], includes)?; - let remote_prover_path = PathBuf::from(&out).join(REMOTE_PROVER_DESCRIPTOR); + let remote_prover_path = out_dir.join(REMOTE_PROVER_DESCRIPTOR); fs::write(&remote_prover_path, remote_prover_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing remote prover file descriptor")?; let store_rpc_file_descriptor = protox::compile([STORE_RPC_PROTO], includes)?; - let store_rpc_path = PathBuf::from(&out).join(STORE_RPC_DESCRIPTOR); + let store_rpc_path = out_dir.join(STORE_RPC_DESCRIPTOR); fs::write(&store_rpc_path, store_rpc_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing store rpc file descriptor")?; let store_ntx_builder_file_descriptor = protox::compile([STORE_NTX_BUILDER_PROTO], includes)?; - let store_ntx_builder_path = PathBuf::from(&out).join(STORE_NTX_BUILDER_DESCRIPTOR); + let store_ntx_builder_path = out_dir.join(STORE_NTX_BUILDER_DESCRIPTOR); fs::write(&store_ntx_builder_path, store_ntx_builder_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing store ntx builder file descriptor")?; let store_block_producer_file_descriptor = protox::compile([STORE_BLOCK_PRODUCER_PROTO], includes)?; - let store_block_producer_path = PathBuf::from(&out).join(STORE_BLOCK_PRODUCER_DESCRIPTOR); + let store_block_producer_path = out_dir.join(STORE_BLOCK_PRODUCER_DESCRIPTOR); fs::write(&store_block_producer_path, store_block_producer_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing store block producer file descriptor")?; let block_producer_file_descriptor = protox::compile([BLOCK_PRODUCER_PROTO], includes)?; - let block_producer_path = PathBuf::from(&out).join(BLOCK_PRODUCER_DESCRIPTOR); + let block_producer_path = out_dir.join(BLOCK_PRODUCER_DESCRIPTOR); fs::write(&block_producer_path, block_producer_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing block producer file descriptor")?; let validator_file_descriptor = protox::compile([VALIDATOR_PROTO], includes)?; - let validator_path = PathBuf::from(&out).join(VALIDATOR_DESCRIPTOR); + let validator_path = out_dir.join(VALIDATOR_DESCRIPTOR); fs::write(&validator_path, validator_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing validator file descriptor")?; diff --git a/scripts/check-features.sh b/scripts/check-features.sh index 0b128a185..f51e5c71f 100755 --- a/scripts/check-features.sh +++ b/scripts/check-features.sh @@ -7,9 +7,8 @@ set -euo pipefail echo "Checking all feature combinations with cargo-hack..." -# Set environment variables to treat warnings as errors and build protos +# Set environment variables to treat warnings as errors export RUSTFLAGS="-D warnings" -export BUILD_PROTO=1 # Run cargo-hack with comprehensive feature checking cargo hack check \ From aa3981c3ca06f1446f23abbf6ccedb1cbf5a44e8 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 27 Feb 2026 09:31:07 +0200 Subject: [PATCH 59/77] chore: use `build-rs` in build scripts (#1716) --- Cargo.lock | 27 ++++++++++++++++---------- Cargo.toml | 1 + bin/remote-prover/Cargo.toml | 1 + bin/remote-prover/build.rs | 5 ++--- crates/ntx-builder/Cargo.toml | 3 +++ crates/ntx-builder/build.rs | 4 ++-- crates/proto/Cargo.toml | 1 + crates/proto/build.rs | 8 ++------ crates/remote-prover-client/Cargo.toml | 1 + crates/remote-prover-client/build.rs | 6 ++---- crates/store/Cargo.toml | 1 + crates/store/build.rs | 10 ++++------ crates/validator/Cargo.toml | 3 +++ crates/validator/build.rs | 7 ++++--- proto/Cargo.toml | 7 ++++--- proto/build.rs | 13 +++---------- 16 files changed, 51 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7357d1a92..402c1541f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -702,7 +702,7 @@ dependencies = [ "bitflags", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -755,6 +755,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "build-rs" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc87f52297187fb5d25bde3d368f0480f88ac1d8f3cf4c80ac5575435511114" +dependencies = [ + "unicode-ident", +] + [[package]] name = "bumpalo" version = "3.19.1" @@ -2266,15 +2275,6 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.13.0" @@ -2972,6 +2972,7 @@ name = "miden-node-ntx-builder" version = "0.14.0" dependencies = [ "anyhow", + "build-rs", "diesel", "diesel_migrations", "futures", @@ -3003,6 +3004,7 @@ version = "0.14.0" dependencies = [ "anyhow", "assert_matches", + "build-rs", "fs-err", "hex", "http 1.4.0", @@ -3026,6 +3028,7 @@ dependencies = [ name = "miden-node-proto-build" version = "0.14.0" dependencies = [ + "build-rs", "fs-err", "miette", "protox", @@ -3074,6 +3077,7 @@ version = "0.14.0" dependencies = [ "anyhow", "assert_matches", + "build-rs", "criterion", "deadpool", "deadpool-diesel", @@ -3178,6 +3182,7 @@ dependencies = [ "anyhow", "aws-config", "aws-sdk-kms", + "build-rs", "diesel", "diesel_migrations", "k256", @@ -3277,6 +3282,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "build-rs", "clap", "fs-err", "http 1.4.0", @@ -3310,6 +3316,7 @@ dependencies = [ name = "miden-remote-prover-client" version = "0.14.0" dependencies = [ + "build-rs", "fs-err", "getrandom 0.4.1", "miden-node-proto-build", diff --git a/Cargo.toml b/Cargo.toml index 3bcb715ec..3e41e9c1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ miden-crypto = { default-features = false, version = "0.19" } anyhow = { version = "1.0" } assert_matches = { version = "1.5" } async-trait = { version = "0.1" } +build-rs = { version = "0.3" } clap = { features = ["derive"], version = "4.5" } deadpool = { default-features = false, version = "0.12" } deadpool-diesel = { version = "0.6" } diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 60ae9f969..3b0009cbc 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -47,6 +47,7 @@ miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } [build-dependencies] +build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/remote-prover/build.rs b/bin/remote-prover/build.rs index 6183263eb..0d9f0f89d 100644 --- a/bin/remote-prover/build.rs +++ b/bin/remote-prover/build.rs @@ -1,4 +1,4 @@ -use std::path::{Path, PathBuf}; +use std::path::Path; use fs_err as fs; use miden_node_proto_build::remote_prover_api_descriptor; @@ -9,8 +9,7 @@ use tonic_prost_build::FileDescriptorSet; fn main() -> miette::Result<()> { miden_node_rocksdb_cxx_linkage_fix::configure(); - let dst_dir = - PathBuf::from(std::env::var("OUT_DIR").expect("OUT_DIR should be set")).join("generated"); + let dst_dir = build_rs::input::out_dir().join("generated"); // Remove all existing files. let _ = fs::remove_dir_all(&dst_dir); diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 0c30970a0..9ef143cae 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -34,6 +34,9 @@ tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } +[build-dependencies] +build-rs = { workspace = true } + [dev-dependencies] miden-node-test-macro = { path = "../test-macro" } miden-node-utils = { features = ["testing"], workspace = true } diff --git a/crates/ntx-builder/build.rs b/crates/ntx-builder/build.rs index 881be3168..78883c50c 100644 --- a/crates/ntx-builder/build.rs +++ b/crates/ntx-builder/build.rs @@ -3,9 +3,9 @@ // . fn main() { - println!("cargo:rerun-if-changed=./src/db/migrations"); + build_rs::output::rerun_if_changed("src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // - println!("cargo:rerun-if-changed=Cargo.toml"); + build_rs::output::rerun_if_changed("Cargo.toml"); } diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 5c308ae58..b0a7461d3 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -33,6 +33,7 @@ assert_matches = { workspace = true } proptest = { version = "1.7" } [build-dependencies] +build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 9c42bcb08..07117f466 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -1,5 +1,4 @@ -use std::env; -use std::path::{Path, PathBuf}; +use std::path::Path; use fs_err as fs; use miden_node_proto_build::{ @@ -16,12 +15,9 @@ use tonic_prost_build::FileDescriptorSet; /// Generates Rust protobuf bindings using `miden-node-proto-build`. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-changed=../../proto/proto"); - miden_node_rocksdb_cxx_linkage_fix::configure(); - let dst_dir = - PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR should be set")).join("generated"); + let dst_dir = build_rs::input::out_dir().join("generated"); // Remove all existing files. let _ = fs::remove_dir_all(&dst_dir); diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 811df0995..3edb6ea54 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -41,6 +41,7 @@ tokio = { default-features = false, features = ["sync"], optional = tru tonic-prost = { workspace = true } [build-dependencies] +build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { workspace = true } miette = { features = ["fancy"], version = "7.5" } diff --git a/crates/remote-prover-client/build.rs b/crates/remote-prover-client/build.rs index 226f51332..aab668186 100644 --- a/crates/remote-prover-client/build.rs +++ b/crates/remote-prover-client/build.rs @@ -1,5 +1,5 @@ use std::io::Write; -use std::path::{Path, PathBuf}; +use std::path::Path; use fs_err as fs; use miden_node_proto_build::remote_prover_api_descriptor; @@ -8,9 +8,7 @@ use tonic_prost_build::FileDescriptorSet; /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - let dst_dir = - PathBuf::from(std::env::var("OUT_DIR").expect("OUT_DIR is always set for build.rs")) - .join("generated"); + let dst_dir = build_rs::input::out_dir().join("generated"); // Remove all existing files. let _ = fs::remove_dir_all(&dst_dir); diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 59dae55e1..f9be6de10 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -50,6 +50,7 @@ tracing = { workspace = true } url = { workspace = true } [build-dependencies] +build-rs = { workspace = true } fs-err = { workspace = true } miden-agglayer = { branch = "next", features = ["testing"], git = "https://github.com/0xMiden/miden-base" } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/crates/store/build.rs b/crates/store/build.rs index cd6fca23f..f7ba0825c 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -9,11 +9,11 @@ use miden_protocol::account::{Account, AccountCode, AccountFile}; use miden_protocol::{Felt, Word}; fn main() { - println!("cargo:rerun-if-changed=./src/db/migrations"); + build_rs::output::rerun_if_changed("src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // - println!("cargo:rerun-if-changed=Cargo.toml"); + build_rs::output::rerun_if_changed("Cargo.toml"); // Generate sample agglayer account files for genesis config samples. generate_agglayer_sample_accounts(); @@ -28,11 +28,9 @@ fn main() { /// - `02-with-account-files/agglayer_faucet_usdc.mac` - agglayer faucet for wrapped USDC fn generate_agglayer_sample_accounts() { // Use CARGO_MANIFEST_DIR to get the absolute path to the crate root - let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set"); + let manifest_dir = build_rs::input::cargo_manifest_dir(); let samples_dir: PathBuf = - [&manifest_dir, "src", "genesis", "config", "samples", "02-with-account-files"] - .iter() - .collect(); + manifest_dir.join("src/genesis/config/samples/02-with-account-files"); // Create the directory if it doesn't exist fs_err::create_dir_all(&samples_dir).expect("Failed to create samples directory"); diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 1b4db7e04..9acd553c6 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -37,4 +37,7 @@ tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } +[build-dependencies] +build-rs = { workspace = true } + [dev-dependencies] diff --git a/crates/validator/build.rs b/crates/validator/build.rs index b9f947e17..59c416faf 100644 --- a/crates/validator/build.rs +++ b/crates/validator/build.rs @@ -1,9 +1,10 @@ // This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in -// `validator/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . +// `validator/src/db/migrations.rs` to include the latest version of the migrations into the binary, +// see . fn main() { - println!("cargo:rerun-if-changed=./src/db/migrations"); + build_rs::output::rerun_if_changed("./src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // - println!("cargo:rerun-if-changed=Cargo.toml"); + build_rs::output::rerun_if_changed("Cargo.toml"); } diff --git a/proto/Cargo.toml b/proto/Cargo.toml index ba7d15f26..ee79d7adc 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -24,6 +24,7 @@ protox = { workspace = true } tonic-prost-build = { workspace = true } [build-dependencies] -fs-err = { workspace = true } -miette = { version = "7.6" } -protox = { workspace = true } +build-rs = { workspace = true } +fs-err = { workspace = true } +miette = { version = "7.6" } +protox = { workspace = true } diff --git a/proto/build.rs b/proto/build.rs index 73db17cab..7246ab495 100644 --- a/proto/build.rs +++ b/proto/build.rs @@ -1,6 +1,3 @@ -use std::env; -use std::path::PathBuf; - use fs_err as fs; use miette::{Context, IntoDiagnostic}; use protox::prost::Message; @@ -28,14 +25,10 @@ const VALIDATOR_DESCRIPTOR: &str = "validator_file_descriptor.bin"; /// This is done only if `BUILD_PROTO` environment variable is set to `1` to avoid running the /// script on crates.io where repo-level .proto files are not available. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-changed=./proto"); - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); - - let out_dir = PathBuf::from( - env::var("OUT_DIR").expect("env::OUT_DIR is always set in build.rs when used with cargo"), - ); + build_rs::output::rerun_if_changed("./proto"); - let crate_root: PathBuf = env!("CARGO_MANIFEST_DIR").into(); + let out_dir = build_rs::input::out_dir(); + let crate_root = build_rs::input::cargo_manifest_dir(); let proto_src_dir = crate_root.join("proto"); let includes = &[proto_src_dir]; From 2f4dc6f4e9cf55aba546d530168e26a7039752e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Muhammet=20Eren=20Karaku=C5=9F?= Date: Fri, 27 Feb 2026 13:21:35 +0300 Subject: [PATCH 60/77] refactor(rpc): clean up api.rs with section separators and comments (#1713) --- crates/rpc/src/server/api.rs | 200 +++++++++++++++++++---------------- 1 file changed, 109 insertions(+), 91 deletions(-) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index f2a88cc05..6d5ca862e 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -152,8 +152,13 @@ impl RpcService { } } +// API IMPLEMENTATION +// ================================================================================================ + #[tonic::async_trait] impl api_server::Api for RpcService { + // -- Nullifier endpoints ----------------------------------------------------------------- + async fn check_nullifiers( &self, request: Request, @@ -183,6 +188,8 @@ impl api_server::Api for RpcService { self.store.clone().sync_nullifiers(request).await } + // -- Block endpoints --------------------------------------------------------------------- + async fn get_block_header_by_number( &self, request: Request, @@ -192,6 +199,17 @@ impl api_server::Api for RpcService { self.store.clone().get_block_header_by_number(request).await } + async fn get_block_by_number( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + debug!(target: COMPONENT, ?request); + + self.store.clone().get_block_by_number(request).await + } + async fn sync_chain_mmr( &self, request: Request, @@ -201,14 +219,7 @@ impl api_server::Api for RpcService { self.store.clone().sync_chain_mmr(request).await } - async fn sync_account_storage_maps( - &self, - request: Request, - ) -> Result, Status> { - debug!(target: COMPONENT, request = ?request.get_ref()); - - self.store.clone().sync_account_storage_maps(request).await - } + // -- Note endpoints ---------------------------------------------------------------------- async fn sync_notes( &self, @@ -242,6 +253,26 @@ impl api_server::Api for RpcService { self.store.clone().get_notes_by_id(request).await } + async fn get_note_script_by_root( + &self, + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request); + + self.store.clone().get_note_script_by_root(request).await + } + + // -- Account endpoints ------------------------------------------------------------------- + + async fn sync_account_storage_maps( + &self, + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request.get_ref()); + + self.store.clone().sync_account_storage_maps(request).await + } + async fn sync_account_vault( &self, request: tonic::Request, @@ -252,6 +283,41 @@ impl api_server::Api for RpcService { self.store.clone().sync_account_vault(request).await } + /// Validates storage map key limits before forwarding the account request to the store. + async fn get_account( + &self, + request: Request, + ) -> Result, Status> { + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ + SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, + }; + + let request = request.into_inner(); + + debug!(target: COMPONENT, ?request); + + // Validate total storage map key limit before forwarding to store + if let Some(details) = &request.details { + let total_keys: usize = details + .storage_maps + .iter() + .filter_map(|m| m.slot_data.as_ref()) + .filter_map(|d| match d { + ProtoMapKeys(keys) => Some(keys.map_keys.len()), + ProtoMapAllEntries(_) => None, + }) + .sum(); + check::(total_keys)?; + } + + self.store.clone().get_account(request).await + } + + // -- Transaction submission -------------------------------------------------------------- + + /// Deserializes and rebuilds the transaction with MAST decorators stripped from output note + /// scripts, verifies the transaction proof, optionally re-executes via the validator if + /// transaction inputs are provided, then forwards the transaction to the block producer. async fn submit_proven_transaction( &self, request: Request, @@ -285,18 +351,7 @@ impl api_server::Api for RpcService { .account_update_details(tx.account_update().details().clone()) .add_input_notes(tx.input_notes().iter().cloned()); - let stripped_outputs = tx.output_notes().iter().map(|note| match note { - OutputNote::Full(note) => { - let mut mast = note.script().mast().clone(); - Arc::make_mut(&mut mast).strip_decorators(); - let script = NoteScript::from_parts(mast, note.script().entrypoint()); - let recipient = - NoteRecipient::new(note.serial_num(), script, note.storage().clone()); - let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); - OutputNote::Full(new_note) - }, - other => other.clone(), - }); + let stripped_outputs = strip_output_note_decorators(tx.output_notes().iter()); builder = builder.add_output_notes(stripped_outputs); let rebuilt_tx = builder.build().map_err(|e| Status::invalid_argument(e.to_string()))?; let mut request = request; @@ -330,6 +385,8 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_transaction(request).await } + /// Deserializes the batch, strips MAST decorators from full output note scripts, rebuilds + /// the batch, then forwards it to the block producer. async fn submit_proven_batch( &self, request: tonic::Request, @@ -344,23 +401,8 @@ impl api_server::Api for RpcService { .map_err(|err| Status::invalid_argument(err.as_report_context("invalid batch")))?; // Build a new batch with output notes' decorators removed - let stripped_outputs: Vec = batch - .output_notes() - .iter() - .map(|note| match note { - OutputNote::Full(note) => { - let mut mast = note.script().mast().clone(); - Arc::make_mut(&mut mast).strip_decorators(); - let script = NoteScript::from_parts(mast, note.script().entrypoint()); - let recipient = - NoteRecipient::new(note.serial_num(), script, note.storage().clone()); - let new_note = - Note::new(note.assets().clone(), note.metadata().clone(), recipient); - OutputNote::Full(new_note) - }, - other => other.clone(), - }) - .collect(); + let stripped_outputs: Vec = + strip_output_note_decorators(batch.output_notes().iter()).collect(); let rebuilt_batch = ProvenBatch::new( batch.id(), @@ -388,44 +430,17 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_batch(request).await } - async fn get_block_by_number( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - - debug!(target: COMPONENT, ?request); + // -- Status & utility endpoints ---------------------------------------------------------- - self.store.clone().get_block_by_number(request).await - } - - async fn get_account( + async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { - use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ - SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, - }; - - let request = request.into_inner(); - - debug!(target: COMPONENT, ?request); + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request); - // Validate total storage map key limit before forwarding to store - if let Some(details) = &request.details { - let total_keys: usize = details - .storage_maps - .iter() - .filter_map(|m| m.slot_data.as_ref()) - .filter_map(|d| match d { - ProtoMapKeys(keys) => Some(keys.map_keys.len()), - ProtoMapAllEntries(_) => None, - }) - .sum(); - check::(total_keys)?; - } + check::(request.get_ref().account_ids.len())?; - self.store.clone().get_account(request).await + self.store.clone().sync_transactions(request).await } async fn status( @@ -464,26 +479,6 @@ impl api_server::Api for RpcService { })) } - async fn get_note_script_by_root( - &self, - request: Request, - ) -> Result, Status> { - debug!(target: COMPONENT, request = ?request); - - self.store.clone().get_note_script_by_root(request).await - } - - async fn sync_transactions( - &self, - request: Request, - ) -> Result, Status> { - debug!(target: COMPONENT, request = ?request); - - check::(request.get_ref().account_ids.len())?; - - self.store.clone().sync_transactions(request).await - } - async fn get_limits( &self, request: Request<()>, @@ -494,6 +489,29 @@ impl api_server::Api for RpcService { } } +// HELPERS +// ================================================================================================ + +/// Strips decorators from full output notes' scripts. +/// +/// This removes MAST decorators from note scripts before forwarding to the block producer, +/// as decorators are not needed for transaction processing. +fn strip_output_note_decorators<'a>( + notes: impl Iterator + 'a, +) -> impl Iterator + 'a { + notes.map(|note| match note { + OutputNote::Full(note) => { + let mut mast = note.script().mast().clone(); + Arc::make_mut(&mut mast).strip_decorators(); + let script = NoteScript::from_parts(mast, note.script().entrypoint()); + let recipient = NoteRecipient::new(note.serial_num(), script, note.storage().clone()); + let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); + OutputNote::Full(new_note) + }, + other => other.clone(), + }) +} + // LIMIT HELPERS // ================================================================================================ From 7dbe5d1e9939a46bef1d004c4d8fd8d688d339a8 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare <43513081+bobbinth@users.noreply.github.com> Date: Sat, 28 Feb 2026 01:32:57 -0800 Subject: [PATCH 61/77] refactor: rename `NoteRoot` message into `NoteScriptRoot` (#1722) --- CHANGELOG.md | 1 + crates/ntx-builder/src/store.rs | 2 +- crates/rpc/src/server/api.rs | 14 ++++++-------- crates/store/src/server/ntx_builder.rs | 5 +++-- crates/store/src/server/rpc_api.rs | 5 +++-- proto/proto/internal/store.proto | 4 ++-- proto/proto/rpc.proto | 2 +- proto/proto/types/note.proto | 8 ++++---- 8 files changed, 21 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9016482a6..fe3acc00c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). - [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). +- [BREAKING] Renamed `NoteRoot` protobuf message used in `GetNoteScriptByRoot` gRPC endpoints into `NoteScriptRoot` ([#1722](https://github.com/0xMiden/miden-node/pull/1722)). ## v0.13.7 (2026-02-25) diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index ac5f4c863..b04a9d75f 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -365,7 +365,7 @@ impl StoreClient { &self, root: Word, ) -> Result, StoreError> { - let request = proto::note::NoteRoot { root: Some(root.into()) }; + let request = proto::note::NoteScriptRoot { root: Some(root.into()) }; let script = self.inner.clone().get_note_script_by_root(request).await?.into_inner().script; diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 6d5ca862e..a0ec88859 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -255,7 +255,7 @@ impl api_server::Api for RpcService { async fn get_note_script_by_root( &self, - request: Request, + request: Request, ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); @@ -534,13 +534,11 @@ fn endpoint_limits(params: &[(&str, usize)]) -> proto::rpc::EndpointLimits { /// Cached RPC query parameter limits. static RPC_LIMITS: LazyLock = LazyLock::new(|| { - use { - QueryParamAccountIdLimit as AccountId, - QueryParamNoteIdLimit as NoteId, - QueryParamNoteTagLimit as NoteTag, - QueryParamNullifierLimit as Nullifier, - QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal, - }; + use QueryParamAccountIdLimit as AccountId; + use QueryParamNoteIdLimit as NoteId; + use QueryParamNoteTagLimit as NoteTag; + use QueryParamNullifierLimit as Nullifier; + use QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal; proto::rpc::RpcLimits { endpoints: std::collections::HashMap::from([ diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 6a61b4daf..495bda834 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -181,11 +181,12 @@ impl ntx_builder_server::NtxBuilder for StoreApi { async fn get_note_script_by_root( &self, - request: Request, + request: Request, ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); - let root = read_root::(request.into_inner().root, "NoteRoot")?; + let root = + read_root::(request.into_inner().root, "NoteScriptRoot")?; let note_script = self .state diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 829d543f3..bb3098fff 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -360,11 +360,12 @@ impl rpc_server::Rpc for StoreApi { async fn get_note_script_by_root( &self, - request: Request, + request: Request, ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); - let root = read_root::(request.into_inner().root, "NoteRoot")?; + let root = + read_root::(request.into_inner().root, "NoteScriptRoot")?; let note_script = self .state diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 1012476d1..3720991a0 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -45,7 +45,7 @@ service Rpc { rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (rpc.MaybeNoteScript) {} // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // @@ -269,7 +269,7 @@ service NtxBuilder { rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (rpc.MaybeNoteScript) {} // Returns vault asset witnesses for the specified account. rpc GetVaultAssetWitnesses(VaultAssetWitnessesRequest) returns (VaultAssetWitnessesResponse) {} diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index 59f587f67..1a218539e 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -51,7 +51,7 @@ service Api { rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (MaybeNoteScript) {} // TRANSACTION SUBMISSION ENDPOINTS // -------------------------------------------------------------------------------------------- diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index ebaa64ed6..0824a0307 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -120,9 +120,9 @@ message NoteSyncRecord { primitives.SparseMerklePath inclusion_path = 4; } -// Represents a note root. -message NoteRoot { - // The root of the note. +// Represents a commitment to a note script. +message NoteScriptRoot { + // Root of the note script. primitives.Digest root = 1; } @@ -130,6 +130,6 @@ message NoteRoot { message NoteScript { // Entrypoint of the script. uint32 entrypoint = 1; - // Mast of the script. + // MAST of the script. bytes mast = 2; } From 35327afe18f220f63e2550586de221abb0d67e76 Mon Sep 17 00:00:00 2001 From: Marti Date: Mon, 2 Mar 2026 14:03:55 +0100 Subject: [PATCH 62/77] chore: update protocol to rev 3154a3 (#1724) --- Cargo.lock | 72 +++++++++++++----- bin/network-monitor/src/deploy/wallet.rs | 8 +- bin/network-monitor/src/remote_prover.rs | 3 +- bin/remote-prover/src/server/tests.rs | 9 ++- bin/stress-test/src/seeding/mod.rs | 10 ++- .../src/db/models/queries/tests.rs | 9 ++- crates/store/Cargo.toml | 1 + crates/store/build.rs | 67 ++++++++++++++-- .../src/db/models/queries/accounts/tests.rs | 35 ++++++--- crates/store/src/db/tests.rs | 49 +++++++++--- crates/store/src/genesis/config/mod.rs | 12 +-- .../agglayer_faucet_eth.mac | Bin 17931 -> 11393 bytes .../agglayer_faucet_usdc.mac | Bin 17931 -> 11393 bytes .../samples/02-with-account-files/bridge.mac | Bin 17955 -> 19129 bytes crates/store/src/genesis/config/tests.rs | 19 +++-- crates/store/src/inner_forest/tests.rs | 9 ++- 16 files changed, 227 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 402c1541f..5119d2ae6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1537,6 +1537,15 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.5.7" @@ -2074,7 +2083,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.2", + "socket2 0.5.10", "system-configuration", "tokio", "tower-service", @@ -2620,7 +2629,7 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miden-agglayer" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "fs-err", "miden-assembly", @@ -2630,7 +2639,9 @@ dependencies = [ "miden-protocol", "miden-standards", "miden-utils-sync", + "primitive-types", "regex", + "thiserror 2.0.18", "walkdir", ] @@ -2689,7 +2700,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -3224,7 +3235,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "bech32", "fs-err", @@ -3254,7 +3265,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "proc-macro2", "quote", @@ -3336,7 +3347,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "fs-err", "miden-assembly", @@ -3353,7 +3364,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3376,7 +3387,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "miden-processor", "miden-protocol", @@ -3389,7 +3400,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#0904e2c610ab49bf6ee13cf349c05c8621f23cd2" +source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "miden-protocol", "miden-tx", @@ -4014,6 +4025,16 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "primitive-types" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721a1da530b5a2633218dc9f75713394c983c352be88d2d7c9ee85e2c4c21794" +dependencies = [ + "fixed-hash", + "uint", +] + [[package]] name = "proc-macro-crate" version = "3.4.0" @@ -4194,7 +4215,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls 0.23.36", - "socket2 0.6.2", + "socket2 0.5.10", "thiserror 2.0.18", "tokio", "tracing", @@ -4232,7 +4253,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.2", + "socket2 0.5.10", "tracing", "windows-sys 0.60.2", ] @@ -4541,7 +4562,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5035,6 +5056,12 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "string_cache" version = "0.8.9" @@ -5812,6 +5839,18 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -6259,15 +6298,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.60.2" diff --git a/bin/network-monitor/src/deploy/wallet.rs b/bin/network-monitor/src/deploy/wallet.rs index de687ab6d..ba074a60f 100644 --- a/bin/network-monitor/src/deploy/wallet.rs +++ b/bin/network-monitor/src/deploy/wallet.rs @@ -4,10 +4,10 @@ use std::path::Path; use anyhow::Result; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::auth::{AuthScheme, AuthSecretKey}; use miden_protocol::account::{Account, AccountFile, AccountStorageMode, AccountType}; use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; -use miden_standards::AuthScheme; +use miden_standards::AuthMethod; use miden_standards::account::wallets::create_basic_wallet; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -22,7 +22,9 @@ use crate::COMPONENT; pub fn create_wallet_account() -> Result<(Account, SecretKey)> { let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; let init_seed: [u8; 32] = rng.random(); let wallet_account = create_basic_wallet( diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index 791315d3b..b103a60c4 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -8,6 +8,7 @@ use std::time::Duration; use anyhow::Context; use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverClient}; use miden_node_proto::generated as proto; +use miden_protocol::account::auth::AuthScheme; use miden_protocol::asset::{Asset, FungibleAsset}; use miden_protocol::note::NoteType; use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; @@ -277,7 +278,7 @@ pub async fn generate_mock_transaction() -> anyhow::Result { // Create an account with basic authentication let account = mock_chain_builder - .add_existing_wallet(Auth::BasicAuth) + .add_existing_wallet(Auth::BasicAuth { auth_scheme: AuthScheme::Falcon512Rpo }) .context("Failed to add wallet to mock chain")?; // Create a fungible asset diff --git a/bin/remote-prover/src/server/tests.rs b/bin/remote-prover/src/server/tests.rs index 46bea96e7..d51b5c1da 100644 --- a/bin/remote-prover/src/server/tests.rs +++ b/bin/remote-prover/src/server/tests.rs @@ -5,6 +5,7 @@ use std::time::Duration; use assert_matches::assert_matches; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::account::auth::AuthScheme; use miden_protocol::asset::{Asset, FungibleAsset}; use miden_protocol::batch::{ProposedBatch, ProvenBatch}; use miden_protocol::note::NoteType; @@ -61,7 +62,9 @@ impl ProofRequest { async fn mock_tx() -> ExecutedTransaction { // Create a mock transaction to send to the server let mut mock_chain_builder = MockChainBuilder::new(); - let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); + let account = mock_chain_builder + .add_existing_wallet(Auth::BasicAuth { auth_scheme: AuthScheme::Falcon512Rpo }) + .unwrap(); let fungible_asset_1: Asset = FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) @@ -91,7 +94,9 @@ impl ProofRequest { async fn mock_batch() -> ProposedBatch { // Create a mock transaction to send to the server let mut mock_chain_builder = MockChainBuilder::new(); - let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); + let account = mock_chain_builder + .add_existing_wallet(Auth::BasicAuth { auth_scheme: AuthScheme::Falcon512Rpo }) + .unwrap(); let fungible_asset_1: Asset = FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 70cbf04fd..777ac993e 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -10,6 +10,7 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_store::{DataDirectory, GenesisState, Store}; use miden_node_utils::tracing::grpc::OtelInterceptor; +use miden_protocol::account::auth::AuthScheme; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -46,7 +47,7 @@ use miden_protocol::transaction::{ }; use miden_protocol::utils::Serializable; use miden_protocol::{Felt, ONE, Word}; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::account::faucets::BasicFungibleFaucet; use miden_standards::account::wallets::BasicWallet; use miden_standards::note::P2idNote; @@ -325,7 +326,7 @@ fn create_account(public_key: PublicKey, index: u64, storage_mode: AccountStorag AccountBuilder::new(init_seed.try_into().unwrap()) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(storage_mode) - .with_auth_component(AuthFalcon512Rpo::new(public_key.into())) + .with_auth_component(AuthSingleSig::new(public_key.into(), AuthScheme::Falcon512Rpo)) .with_component(BasicWallet) .build() .unwrap() @@ -343,7 +344,10 @@ fn create_faucet() -> Account { .account_type(AccountType::FungibleFaucet) .storage_mode(AccountStorageMode::Private) .with_component(BasicFungibleFaucet::new(token_symbol, 2, Felt::new(u64::MAX)).unwrap()) - .with_auth_component(AuthFalcon512Rpo::new(key_pair.public_key().into())) + .with_auth_component(AuthSingleSig::new( + key_pair.public_key().into(), + AuthScheme::Falcon512Rpo, + )) .build() .unwrap() } diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs index 7bd9b2cfe..2b558a49d 100644 --- a/crates/ntx-builder/src/db/models/queries/tests.rs +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -540,9 +540,9 @@ fn note_script_insert_is_idempotent() { /// /// Uses `AccountBuilder` with minimal components needed for serialization. fn mock_account(_account_id: NetworkAccountId) -> miden_protocol::account::Account { - use miden_protocol::account::auth::PublicKeyCommitment; + use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; use miden_protocol::account::{AccountBuilder, AccountComponent}; - use miden_standards::account::auth::AuthFalcon512Rpo; + use miden_standards::account::auth::AuthSingleSig; let component_code = miden_standards::code_builder::CodeBuilder::default() .compile_component_code("test::interface", "pub proc test_proc push.1.2 add end") @@ -559,7 +559,10 @@ fn mock_account(_account_id: NetworkAccountId) -> miden_protocol::account::Accou .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Network) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(Word::default()))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(Word::default()), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap() } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index f9be6de10..17dcf9619 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -55,6 +55,7 @@ fs-err = { workspace = true } miden-agglayer = { branch = "next", features = ["testing"], git = "https://github.com/0xMiden/miden-base" } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } miden-protocol = { features = ["std"], workspace = true } +miden-standards = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/crates/store/build.rs b/crates/store/build.rs index f7ba0825c..c03975760 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -4,9 +4,18 @@ use std::path::PathBuf; use std::sync::Arc; -use miden_agglayer::{create_existing_agglayer_faucet, create_existing_bridge_account}; -use miden_protocol::account::{Account, AccountCode, AccountFile}; +use miden_agglayer::{ + EthAddressFormat, + create_existing_agglayer_faucet, + create_existing_bridge_account, +}; +use miden_protocol::account::auth::AuthScheme; +use miden_protocol::account::{Account, AccountCode, AccountFile, AccountStorageMode, AccountType}; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; +use miden_protocol::crypto::rand::RpoRandomCoin; use miden_protocol::{Felt, Word}; +use miden_standards::AuthMethod; +use miden_standards::account::wallets::create_basic_wallet; fn main() { build_rs::output::rerun_if_changed("src/db/migrations"); @@ -35,25 +44,65 @@ fn generate_agglayer_sample_accounts() { // Create the directory if it doesn't exist fs_err::create_dir_all(&samples_dir).expect("Failed to create samples directory"); - // Use deterministic seeds for reproducible builds - // WARNING: DO NOT USE THIS IN PRODUCTION + // Use deterministic seeds for reproducible builds. + // WARNING: DO NOT USE THESE IN PRODUCTION let bridge_seed: Word = Word::new([Felt::new(1u64); 4]); let eth_faucet_seed: Word = Word::new([Felt::new(2u64); 4]); let usdc_faucet_seed: Word = Word::new([Felt::new(3u64); 4]); + // Create bridge admin and GER manager as proper wallet accounts. + // WARNING: DO NOT USE THESE IN PRODUCTION + let bridge_admin_key = + SecretKey::with_rng(&mut RpoRandomCoin::new(Word::new([Felt::new(4u64); 4]))); + let ger_manager_key = + SecretKey::with_rng(&mut RpoRandomCoin::new(Word::new([Felt::new(5u64); 4]))); + + let bridge_admin = create_basic_wallet( + [4u8; 32], + AuthMethod::SingleSig { + approver: (bridge_admin_key.public_key().into(), AuthScheme::Falcon512Rpo), + }, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ) + .expect("bridge admin account should be valid"); + + let ger_manager = create_basic_wallet( + [5u8; 32], + AuthMethod::SingleSig { + approver: (ger_manager_key.public_key().into(), AuthScheme::Falcon512Rpo), + }, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ) + .expect("GER manager account should be valid"); + + let bridge_admin_id = bridge_admin.id(); + let ger_manager_id = ger_manager.id(); + // Create the bridge account first (faucets need to reference it) // Use "existing" variant so accounts have nonce > 0 (required for genesis) - let bridge_account = create_existing_bridge_account(bridge_seed); + let bridge_account = + create_existing_bridge_account(bridge_seed, bridge_admin_id, ger_manager_id); let bridge_account_id = bridge_account.id(); + // Placeholder Ethereum addresses for sample faucets. + // WARNING: DO NOT USE THESE ADDRESSES IN PRODUCTION + let eth_origin_address = EthAddressFormat::new([1u8; 20]); + let usdc_origin_address = EthAddressFormat::new([2u8; 20]); + // Create AggLayer faucets using "existing" variant - // ETH: 18 decimals, max supply of 1 billion tokens + // ETH: 8 decimals (protocol max is 12), max supply of 1 billion tokens let eth_faucet = create_existing_agglayer_faucet( eth_faucet_seed, "ETH", - 18, + 8, Felt::new(1_000_000_000), + Felt::new(0), bridge_account_id, + ð_origin_address, + 0u32, + 10u8, ); // USDC: 6 decimals, max supply of 10 billion tokens @@ -62,7 +111,11 @@ fn generate_agglayer_sample_accounts() { "USDC", 6, Felt::new(10_000_000_000), + Felt::new(0), bridge_account_id, + &usdc_origin_address, + 0u32, + 10u8, ); // Strip source location decorators from account code to ensure deterministic output. diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index e9f529855..0065a1e2c 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -13,7 +13,7 @@ use diesel::{ }; use diesel_migrations::MigrationHarness; use miden_node_utils::fee::test_fee_params; -use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ @@ -36,7 +36,7 @@ use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, Word}; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; use super::*; @@ -156,7 +156,10 @@ fn create_test_account_with_storage() -> (Account, AccountId) { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -423,7 +426,10 @@ fn test_upsert_accounts_updates_is_latest_flag() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component_2) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -520,7 +526,10 @@ fn test_upsert_accounts_with_multiple_storage_slots() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -547,11 +556,12 @@ fn test_upsert_accounts_with_multiple_storage_slots() { "Storage commitment mismatch" ); - // Note: Auth component adds 1 storage slot, so 3 component slots + 1 auth = 4 total + // Note: AuthSingleSig adds 2 storage slots (pub key + scheme id), so 3 component slots + 2 auth + // = 5 total assert_eq!( queried_storage.slots().len(), - 4, - "Expected 4 storage slots (3 component + 1 auth)" + 5, + "Expected 5 storage slots (3 component + 2 auth)" ); // The storage commitment matching proves that all values are correctly preserved. @@ -586,7 +596,10 @@ fn test_upsert_accounts_with_empty_storage() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -613,8 +626,8 @@ fn test_upsert_accounts_with_empty_storage() { "Storage commitment mismatch for empty storage" ); - // Note: Auth component adds 1 storage slot, so even "empty" accounts have 1 slot - assert_eq!(queried_storage.slots().len(), 1, "Expected 1 storage slot (auth component)"); + // Note: AuthSingleSig adds 2 storage slots (pub key + scheme id) + assert_eq!(queried_storage.slots().len(), 2, "Expected 2 storage slots (auth component)"); // Verify the storage header blob exists in database let storage_header_exists: Option = SelectDsl::select( diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 7bc633f95..679186079 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -4,7 +4,7 @@ use std::sync::{Arc, Mutex}; use diesel::{Connection, SqliteConnection}; use miden_node_proto::domain::account::AccountSummary; use miden_node_utils::fee::{test_fee, test_fee_params}; -use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ @@ -62,7 +62,7 @@ use miden_protocol::transaction::{ }; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint, P2idNote}; use pretty_assertions::assert_eq; @@ -1148,7 +1148,10 @@ fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap() } @@ -1246,7 +1249,10 @@ fn mock_account_code_and_storage( .storage_mode(storage_mode) .with_assets(assets) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap() } @@ -1410,7 +1416,10 @@ async fn genesis_with_account_assets() { .storage_mode(AccountStorageMode::Public) .with_component(account_component) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1462,7 +1471,10 @@ async fn genesis_with_account_storage_map() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1512,7 +1524,10 @@ async fn genesis_with_account_assets_and_storage() { .storage_mode(AccountStorageMode::Public) .with_component(account_component) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1546,7 +1561,10 @@ async fn genesis_with_multiple_accounts() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component1) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1568,7 +1586,10 @@ async fn genesis_with_multiple_accounts() { .storage_mode(AccountStorageMode::Public) .with_component(account_component2) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1594,7 +1615,10 @@ async fn genesis_with_multiple_accounts() { .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component3) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -2058,7 +2082,10 @@ fn db_roundtrip_account_storage_with_maps() { .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index ae071c175..f2cfe40b8 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -7,7 +7,7 @@ use std::str::FromStr; use indexmap::IndexMap; use miden_node_utils::crypto::get_rpo_random_coin; use miden_node_utils::signer::BlockSigner; -use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::auth::{AuthScheme, AuthSecretKey}; use miden_protocol::account::{ Account, AccountBuilder, @@ -26,8 +26,8 @@ use miden_protocol::block::FeeParameters; use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey as RpoSecretKey; use miden_protocol::errors::TokenSymbolError; use miden_protocol::{Felt, FieldElement, ONE}; -use miden_standards::AuthScheme; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::AuthMethod; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::account::faucets::{BasicFungibleFaucet, TokenMetadata}; use miden_standards::account::wallets::create_basic_wallet; use rand::distr::weighted::Weight; @@ -221,7 +221,9 @@ impl GenesisConfig { let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; let init_seed: [u8; 32] = rng.random(); let account_type = if has_updatable_code { @@ -429,7 +431,7 @@ impl FungibleFaucetConfig { } = self; let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); + let auth = AuthSingleSig::new(secret_key.public_key().into(), AuthScheme::Falcon512Rpo); let init_seed: [u8; 32] = rng.random(); let max_supply = Felt::try_from(max_supply) diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac index 6bd49fefd83ede7e1dec802e6d17b06c13c1587d..b76116b5740b069ba4065aaaef0fccbc60a84fe2 100644 GIT binary patch delta 7484 zcmc&(2|Sct`+lBb%$OOnuUUs-hG8rf%9<=K%8)SLYzZY5rBKFFLS%W`2o;efAw@-n zSBnaj5EU&*vXr9i-+7pN`?g=-|NXxI_nqJGY3@05o$I>q`=j^Ou-SdLr`Ik61JCXLL0$oYzT5p>ef>9Ych!bq(4e0h;U^n4 z|8*`c4wn@4gY-N3_s?X}Jup5wL0rBU1M><1_GIwngXb!Ea&@_|SXh~h$_0^;b+DYM z6iW{S`az>p=RXBmRS1k1CNWsi^BoYRfXv|3+2}VeY8E$3i8BJnBLA=ye^?4XEcx#i zEKg~hROAwyeMTUyd_o^m3lGcV(t;q?EJuOD0>)g5%&FR}c?*Ips6PYPoNj>7reG8m z0glu2*Emk5e}>KDEI0%4RJ_*D#J^OYAHD+mnQ~s`|E`SKa#PrN!XH`q6Q2CLj<6LK z|6~sSUFA7pZG_3KjKCO?m2@g=W_ETKedY{wHQ*FMulo0u!&3WU(SBIeAC~HO3;BqZ z!;H=%pQOYPEsQ9O3L2}5^Q*c9tNObMiLy==pW{ghvbSJ0w#VSDXnfYJBXqTI8!Xgr zu}oeEUrzrrwu!YeM`^VG8@FG^+pD**IxFR$fY2m@M6Il;u4J@5#9(hC_$) zQi)V5mo%3Ih3C)gh`(PA?7!Kw|H57r|IaAT3(wUk2m6N1?PGv#^Vx%huB zQ-53e{-fERkNM}2*?*Mg|FZJDFdID-{wK=w!gKlmBr$kN z%CP*)edh|zy-$o2VR2q^RJKpL1fytC-kOdTKmO9#M5{4um|rKERyrNBIBju5y`SETC4G06LdcA|4OSQ| z3~t1hPR6)END!TZc9>w1vve|63=m?Gdhk;Mgcxa1*b)#_jRl=!PlhjEZ(e3D=Qv_p zD3h?jK}pDTEA0braP&-R+WuP#l7Ir8%aG(7PJJ1b(<8$-?LQ@F;bnTKn%VUyAG%jI z$)2nc2ucJP&=^T{6#`xFdSr}ABOx_HeyI$xSu5a}5AiiSxqhHz_4#(N96(cGa9>+l zznjlbOUSv7YiB|3t@N!<-*kI3@4llRa(R(BSdLs!QKiZND0;lDba*S`5P06vx7W98 zG8W^PmsTy1#B5>M6jr8Bwhev~L8evKV&y?hIK)uQWSvMf!2cs2{B`~Qq&kDnK1w60eoydyV?CYH$4xH(g$1nPg*Ff3ZJukzHr+-g@0r zcl71a2?cX-slSx&solkLuUsgxoW9b>eb%|k{N=V}opH(Lck3AmOR)%#J_TceMKttn zu<9TO1SCSAj5&-&a`h=#4KU?I(s|kb&$hbb44X!UHXdS7v3KMNoXm7qA_&mrf zv}8vMR3)7mEO#@b@Q}50i_<0WCIF$hGJkDv+YmQW4zcs->vs74teuQ}MzrG_8J~M% zYwEoBrF?glpLC+_uI-d7_udy1y)FkS8Wb7CBwdhLW1J4n*NJH5`bFfD3+bUh`F_~pUV!H_n zuHMHeHtA(e;^yx-!iPcw&J@gOydElYyO4BoVEKxZ_Cm6|#}vp4{X`_Isdh0480C|$ zSQ4tEYOWU&ysl&QNGn-2ZAN#tPi8Vu^;7cwaF^7kB_Mt1<<0!+(~PVUddv0rkS%LW zgGWW=t_EBY$6+pugazsjndl5)5q?Dq60f7m9^&}$MxiH_PB3kBc+7KVZT%GkGV) z#Jw`a`MDV1k7cHouFyYHr&H>*HOU-IBKq1)S07b))qyx&5`B_ax9U*-u8UBVKqgjz zZhqF**HT8|E+>vP$NQs7&gV{NhLgO$O^JPu2wi{4+V0B2WM}sFTD2&!H8QQGf?WwTg%GMX8MB0dR$J_9kN_bhSeuMp11v$L z9{i+FK!&t!u)@n*%}+CK`TW7puu{okRQIr3#O^PV zw%Sucr&^!Osy`gnoU{LS$Td^3V|EyFcIe}ZVATT8Tjf)e&p$CrSa7w;K zE4QP94Y!=j>ge-tPq3`zMs9w2$k-?LzH=3y6GxaxzWK2@y}ikRwq^ZJlcHz+%hfHS z%`?5GszdK^t7ov7y+>&#bQed_8&;6zq zGLjj>Q>ioLaHLMG%$&V`_4QQ`cW@%^C2iT67NkTt(bq50fixSnb@Cb697NMi38_)d7ZiP4TA#V zFTD2YPSTG*sSP?ja`y**fcou0?dc*(l^UKHNw2v6b6 z;I)0brfUX~l~gG#dML{vzEm>i0RcHewZZNITr$XODjB;MxG971(#RNpBBDXF!G?mL zWRVCOSv8OdhJkC$(9%1*{q4jFYm|c8U3IA7qfLC(=s*v1j6JN81ScI)~!8BD8+*t2wPUzKsWW zcLi%z>0#jx)u^RvXE%#Cq-xjQ3kQ6%0B83*)9i63Klg~F0uw79W3wGeX!Xerl?;C=i@H zVD)K_L|RuH3mtrDQxg2-*^I?oJFJu38<*Jk@2O^ijj1U_Q5nW2}o=)fBhM3Oh$k*Nj3j_+@y`$~wk-mFs+7J~6*LNr+fNnXg zZoca4n8urP1*&S$xyv8RUX=01?qq}+C=T5XP68m5Q1^8qUYu-1dd|kyZ52D-R(pnF z7%;1FmM_R`Y$knLtCfgkYE#%LfR_)z^B49b_aZ>$sJZWy_ieP%n9KbucDn;}*=?2{*^a%w22&Us7JQN@QU zisQNOmt^lL=6<#~`i4H(eBsJ~u;lC3%I7b6YH?RE_n{cK>X`R=20SAorWHQ7X~q)M z=Y}~m_^TWCNx+BJ3o_ za9@M>f&BCXeNqCB=Eq*&n-KBk>qzvO2QAF(J>Sk>;X$hk*yj=tA9$8aU+@Sv4D?HB zNo1C1m@npE8sE}we^}~lx4?l@AcN@3T=JK=FLka+X7>^Z+z3h#N=1=aQ<}0@<3irtM;q6g5vS^t?+>{}W$PxA^6)TFn zAFxbgqKiA<@}B5&;fI^@73z31?~ICwK5-7n1j69m3o=$(x5#l3min(!v2k(C!cjMKQp+hOO8MIOwaQ*cg>%v^1okE;?%;+9KH-7XbZn~ zYPae>L6H^tx>K+BCkih=sdRm#NBdMUHmxSB|9pJR8R!80gEbuQ}9TC(bW3PZw>WID;8KcAp&MgYQ z0{lb=ok-1cc{chVcW@{#EC`Fk(l8lThSgv#cnPcz8^f#MHLxAL9^L@|27AN4@HRLQ z-UEli5%3{60ZxRIVFXTvPs8WoVz?BpfN#Jxa0C1peh$Bf2jNk85+-0^F$_$>z}gsi zB?flJz#bUb9|MPA;7AM{hk->?FmM3|F2}%kFmNLV?!*9hxRA5U<>kr0$QSB9?N_A_ J264_c_%9#m1kL~e literal 17931 zcmd6NcRZHg|Nnj2d!%G%kF1+Q_KK`Rk#QTD4UsJ~GdnYsgzP9I4J(=1vO>sC*(xE* z?_8I=KJV}6)AyIp@2}r^JPyzMoagm?yjW@Y2L$NSH}kj`EiB!GR;#nr;W+QQk|1w85IaPhL`RU7j?2~c8h z<7#1T;cD?OowmEu@P`A~k`NULL8leZ>A_$b7gDAI|EWRd0a+MiQIL;;d>mvkkYNE8 zkX1oe2U!DTO^~%fJ_qtOki9_m134Px1d!7}&Ib7*$WK9j266?+RUr3)JPq;;$g?1S z2U+SeE0`+DN>G@eySuwECS?B&1^F=RpcJY3 zT}a3-#0L@zvxTkt_ZnFGR|0yc&Pk*3xG_KelP$;x)BdD`xc5if0)G-B#AU%n3I68) z(;>P)$qTU@U}3;uhYw1XkCBCjMGOv#kMB=(0lGg)3;ZtpU)m1E!2rkipRLY}!ezwe z#K$PW%?%O}Bg87v)Z&-ZMG?PW#CkEeL%Epr2@b^i>bm&H$%{(P)O!)|@v&+23NG{e z`{m<`iV|U93-I{lU>8xNVq;xwCmo$AJbaSmUl2u!Nm85;s&C8KIKqd<5W!C%iI5ND zgoS7tI;Q`@x{0j-1BMSn^s6o1e))j(EDBpJKu$#RKMMXa5J45)euMrO?+@Ysc7h1a z$U@5UE2D5(NmYXB^b*~=)TtoW$g>ypyQ9uN{4bk`^uOezP?U5`%y45+Vq1hBX#n0| zErNU!`1Q#G!-Iu{81CPQ>o?;3jW~Xx|0NdGKKlE=4+cezxx=1qTE{mUf#J>z3KhRy z{sf~Y)`=V3+;a;f>UwP4Y1z3Jqv4hQE!BYS17?l|GjnXQh#Q3>CgCID3&2>W8RNDZ zZChbvMf@nu$nzU8ZF3E>Q`~-w7<5~V`^U822XvSd*w_SqPfaxH|LcZ9tMf3J+* z{mN@QE94`}j)e1v)mwn5_c2(4HaoQ_^ zJSlz1bz-Aj=l9Abnh-ZTm|-eB0}H>Dcoyx#2B^!vJlRG z0s&0=;Y@UgRq%80RsvhYsQKM_{i(ge0<%lRQcxZC4J_xxaK#~HPsYPYC*1e=aXvQ< zv;I@|mibh|S@0k-68=zuk2$*M&HshN28+W9jR$1@Apd*!qS@ntzx2O#8Go0d_lc$oE2NV*WkY@FmS`dHR#_2 zG8}d+m7I@Ajfjt%67d6mNBW0DU{N@Px$fbGDnIO$jagLavt<%M5vQGfF059Z<($>C-Mix5G< zeFWTSCR~OA?849HQ3No(ErZ{-Q5b6hjc*9J?Q<;#0p)e{x-i(MJ-p|YA;@!C(~=Pg zP%}x*K>%lQ&&L2O38U`qgtsXgi z5J6#xX*$vo;K?I+1Oa6Ik$OLy5Wszc2R`wkFe<&VpAlfi z>bmDZ<>b^}JKvzUgHL>blyb4d90vO|N2#MX0n;(Wm-~=GgTffa4M7B5KwwY ze;WaLlS4ldK=djFK1rf5UElj>VX#kg&+8iY#(k2ukPsq3%t5~n1EBR&k~t6o+H^Vn z2w3@i;TQrYXeagt%hsse4Y*{V_O35(#v=kYy~Y~|U_QY100GpxK1jb{wxg5n;Sv<4 z;c)(*xv9W!#7N0zsT9)o`!x5UY4{%U97D1=vgKmT3~KR^lGlUMn+Qn2WkyCTW;AeW zFWdzcl0~=*g*hJEj7-mcnrn`mszRRIa=<~l422oyr}RRK^tI+)5D=4C-iLtssg*rf z8eh25ASHF#Y&d1d=19^@vX-yshyW2{SVF1*Hs@nHvhf|P`08-+5o^o%ii@0%VZ;TlW zb|FK0pZ47Qu4EueOspRa5kSz9w%3{m7$^32-3f_XvTzj&lX&tQf-vj)3VYL}{K|i| zrDv@^pF))8zNubB0E5(-ZUoe7`0cgfTeY#hC7E?xHoJ~VkGn6Dir3;TY|m*d`uR|hm@S&)K-AOKFwWCqF+It zdxlfZh5&q?;k`X5DkypzDXDyZ+!_JOZzgvUu%YF>x9wv(Uvu4ti}q=5xx^wGQ79Yl zXo3L>BRQD7*JtZQJl;qN)!04cJ{a>ApCktG(tyfRubql5j?V6}8(q3{aRL5ou!tw6itujh>*W^ZIbfKF#_0F6`~(9^}QQh?1c; z`Cj|FUmo3Ci9e_|A`xI5JuONRj^JP!GO)nAqR`CPe_<}*Ru(&=kew7=1p^f3VWhAI z0zO|Q4Msp(AVmrS@&kzWUVOP+diDm?iHI@^xN4u#(b^0m^BdDP>%M3A-s^EcSO6F% z9gjx(5%YONQ2{A2*J6K!fJ(PzH3TGZ_~^i3pZ0uE=L?YM%!s2b5pc-PWN#IDNqcE; zU>q)@oq(%QnBe<~do!>6vC1AhSS=tDuG*hDJ`9Ekg6TjDxwQx2-%iJ%7^(S3J@!8T z2e#_pYeZ0lXbtbUnw zc5rdVtpXn`|MQj{F2(%8r@ik6w*C-@01LRj|6hXmlhR(?D5w2d`W-hR`1S;Q3`YGc zDj_-szCX2rmqbZngHnzIacq@sM@F-hA~{UjzsNt|v@#>5V>YPoQPhgUT*({T;B z%9UUZ`}dB;q?V54r#?HZM9^&-pBn$oOa6KN#`>45YjU^Rwzf$aS%~(d- zI8$rLufXy}5I%?j=Cx~TSoiZ6scF@18b0h|CcE&(h|c7(&ti>rYB;m4oFL2r$zRdA zAeAg+kYZ4uYn#}y&XL$H-hxky5#ptSsZZl<(aI19M9 zs9lCECr5i8HikOX^AG|5;I=swTa7nD^@z>UEYJGI)%gtV-Xz<5x`7_^^0$LX<8C7B z0&KVCXOe>~5$6C};D|Xtx>~m__PcQWL2nt$Fk7@!h?^3K15W4_l_zdZ3xTs6JZ6?P z3FP&!a~jXY0<9@xu5b zW7CwEIfh5L2_NdMIhRrb|Bx}G9ze7@KQV1OKct9a{Df7Lmm2-}7%x1qB75p&!l%45NN-HW|dbts~BD%Hy9y&`=tEBt!RFRov(*CsM2HT zVn;+hKVtQ2U)2&@>$TJ@iBqUS3m2yEuq(J%KB7Jq^KFvl$6Adu_C@cn?RN<1fxe@% z({`(#GU$Ip^m1k~>MK(_LFf3%Z3{u>dm;LkH$GkOrv>8$jA;~Uu;Rl}o@BgZZtVFa za?OV^xn8$KY5LrMUn)Xb6 z(@sdo-r*I6X8wFU?eb!Xk~=pKg>7_2-5XQtV3)$)@HVQh%gcRQq4YqE0AylrM-_#@ z!t)KE?%c_*m|U-+SSy%8Q}cdFpPRX+XTZ+@{Da3Jjb(?)kIk?Nk{=5X77g2u^au_t z9_PLy=ECvKL`&zM79;Qtjw3foC5ffIt0mz`<8&mwn#$vv8QxgK+>h#yo$WJlIU@n@ z6>OL8s7yg!tN86NIQaA0X(ui%@Q7aEPqi3gIp?}@2fy?RGvL5uwP9FVHgD6O+nFc- zO!y0t1B!{q>5fY}sNhw4?~HYBivykl%sckH)HGj*#HgrU+hHk;`6hE{CyDeDqcXkI z!!M7-!<@ir(jVr%wudVV)nXj-o4#{`RE2C-v<7;qeFNI%;|4cs(Ue7Mhl)Gs!SCx|G@F zR;1`EQ@^wq%3x-u zKYchOAq}5);W@|OO~1wULh!3vV(G2)2=Ar*+ulj9cLE)F8Y3I&t@V}suy#XvmGDMx zv)W1XQ?*9Dsoxw&dzd*$FFvK|*qS;t5Ie>T?816x-y*p~Yql68#5>JjOGJ&;M=Cp;JyE^6nb#iR6IOi6`JuhsCz;Wdxr3#7Si7{N?>`uh+AAtM={i=dhaGzR zL$3HSrv0&0Y3teAYk`lKu)LlvPAy(LYO7#fugUVmE1W?_4A*;DElg9dldHUr=y3an zHWU5pqYE*M;Sa>6Ib7qp)O{L&$LJ@}G#PQGt6)v(;9lLzBo zB%iZ3REV8WBzo8^~18+Wq0U4u==KpZHr7ovir9L`xlHcYl( zpQ4#bta~`lml64#Z428V?R*;?pGjbY)hm1!*V!RpecKFwlH}b-PeJxfnlE2GTH}dV znNJ3}v<`x6Hh);po>9Vov^d(a?vzvca0@%hnVIF$6!*J1^IsoJTwKkikO%R?ydy6= zVxM*z5(Pcaniy*;_pQFB6(Bf+TUHg}F|m==Z|j7$%fD)~KsGx=+xD|wsrO=N*u*?# zy$Htw+jJ=5JKVf1WnrL41@yKprX*hMs->+(GZ%lzB7UOS@==(u;pQUs0m^Q)%maBm z@Dc$Mn$B=h=t|96&q-I;*}_{iG>u!L?jMDvAGT(kf4ewJmZ}Wmfb~4YEe#~^?8W;3cmJBf_VB?%VEy>Q}jpdD4Cj{f2 zdNX`s=8J6W8NQw$t@e+1E0a__{NJUp25&q)1=a(29K_X$+qBe`ucEUG42wh03)7O- z9N5>ZI902*(PcDA_;^Nm2E`I&qA_!}DsFca=@QA6d)Kj^)syfg8 zj?DEz-xkG@aiH|L6g;O(=zq9=VaAwElvboX zAIoEen6({Wycne+TN^N;2~e3jXSLcr^MlrkKIP2MABO4b@ForV!&B+TM6K<*N7ZPY zyJe@#biMSZ{5d_dzIj=pzM>HBScC=oL_C4xF6zBxZnxHjh>&4o`L~3_RZ$iH<4GWE8(cO`gZ8_FT34JeZ0~uQ`c~W#PBb?x-0ScS<A(-mc`G6V*O(g=+_G4zr*)KV)|(; z?VzEhIzfYObm`8Uq*blhp^rFiMh(VO-@v+~-kQo9#6NUc8Co?PM& zTo;0GZQysnajH{FP?g-X3kh}W%X+qIoZJ*FHfp7=8wQ2rl{fFoUV>k)Kz}rlu{@8| z;XZ9$O>Av@hPv1@+s4&WK07M=Jm=`yNsnqDN!bw)y9N3~HQVxADDk zM!^h~7-TOaY)cLL0|nkGQJ#9ws#`onOv-Z~ksV1o*ya9^QL~>%Nk8IRdIP-{Yhw>q zFYA3%t&F+k#B}cihA&QuHBk(c2eR7NUXo$%lrV~z4Z+TV%j!zny`ba#xml+0dV7s{9boWInM?~q`UbVQU=NvdC5PaY4!WRX>wwK!k z(bH)rV7$OQ@wc3hqAXiDQE%B2{Vc`j9M21Sk{o_Z?P4s@CkYlJ0>?ONxbN+QFqK%`R9j3 z#`v3hcs|l6xXginhXM86uqj1Ocpl3;Vb*&+#k7YuD&72O-xFTrMqiV0#itqhrN?e8 zh%1(gcb~v+;&Y4jWyg2=bxj%c1|3?na<7?Y*4;g^{>6*05K&Molvp&*pEFb0NIey?n)QXTt6~pZ?Q>KF)E)R*$psO@V ztN;h$^}Ulz#ITb=3?sTR*GobTw6e9W>+Mv3cKys4cqo=>4fNnRM7&t(nkSyM#&5XW z)r&=CK42!1PEpi9=tt}CltOm^xmg4>|2~DGm-1}nbt-ns&Q`WO_oC11YKNFvmva`^ z_O!p^H;cf=QF)|oy=3^ond|2sudDGY(`dXi?DVuh>N1n-DnUGn{}OD}{_vS$E892# zv*_(K)0U-6p+cD6FS_=odCU;~uHj1Bfb8f~K!KBg$SJiWBDsD_CuKEiO$e9_yPGcS z61^&FSeHx)x!ZNl;}+Jg%NxU>Jb53#GPWrhf>XP^4DxvbXnk7NC&y6Z?^p!I!JVE0 zID?=Hn{%QUZdeWZv*L}-hM={K+RqIqGUBBT%iWZD8waMU{#C zYCNxBu0ARJYPQ@k=$X9m=GORR6_~&Za2zu0tZ~!lg0A4cIXNGxbeq5zZ`kL%_e|03 z-ED5AKCy0lux`Nf_aNi>ub0Q&@Z)35GUUVFDi4s<4B4P>mZDpPtuH!9!ySTw2 zGB9PGKHYhwtz4=x#|5O|XUV26X*WM%t-SctX*_8M~^^w zf|=hnwsnpQY2D6xj&P0Wi4fufbRsHj7X|G4!)Luv=@YlF^|)pD(AJs?`_A|nbD9+% zkgSW0=8QC?vn?*-BLpO{9{lYdFGxUV0p_he_W+eJ6o}x)`$XWe+S>TC*Z< z3iZ3XUg8PH0nAgm7Q?AVsqOx%@SySL+P&TE2cwQWk6Fy)$LFr+H2NIi$J$*lz&vrM z(UHi=Cn>rX!;K>T%9&*-OL}qMw=_dA{FQ(=hy%XqH~#ow)yROQ!PWdmUx8`&3#O^~ zlUbHxV$tfJ=boX7*{=cr;C<_u@K3=uj#rFFiE-~rFogTK2+s0Jm}(iVb!dlPZ=zK5 z1sre|MU5XGChaPU7Tjs-jg$5hHoC03zWKVA;2c}6#qLx25iKRGf5RX0hVyU(G2~Q*G|ZzXv3BXUUcBzR zaG{hXv;flxN&Ymbvs%8JM`JU#C^~p$uk@K{Gx=jy z?m(6mvYKOWaNh?3-&g_l8La|)>hdrt?k&P+4w9y6%#zZ_aH=%b4SfhKNTkDhgF(N+ zymP+P1nV+xw)bmWo%skQH)5vU-#=E?Y~S$BL+MQ9wdi8~<6#I}Nxpbbk*Dv`E4JtE zibSOc4d~q#i}*xmrS*?r-vN^kIm;XSaZ4!~so9qncIXn8L>Ux*HT?92a4qNF#NzNI=_ zIpjyE2L!!5t543ub%!;bF=BUHKejDjCmggh8{n;yop{}GP%FNXGZEe--fzkaDAYPbv7}8U-7(C({zU=6I+wc4oSJ&o;qr$1gG4_#qPoJVBbFu#Y zZNH2Hc47a%U5(Yc?nWeZuhm`FbH3NlVup!&pj=N_-sr1hPjmo9Ay#j0q2%>>g06=T znf0E_8y_qm4K-?dHSPUGFi*6I=h?IDw^$xZ>mI2Lo>!N_$J64XW%C(f+;xetT@*I{biN(Dq z_?FDyzu(aNn|(5u`jFN;`Qv>bnaO|y+ocq~zr{LJLI1h#v79ku^A2f#*dlw@)w``vaE*APjOSB8 zyfE*H)nP74=N^goSJV7dO;47qS>hj!#J_1zB&sTCGt)8y8wJUKajEFtvV9E0t+K@$ z!4gf~?M*9h$~WUnC-g(7S?09Xwy<#|KAe?G_Iwb6udmXWmPbz*aQxJ*cRr2js|!v7 z-%qWKqyjB&Ab4VFyyFsAh9tkh-I_P2Ic8?~Tjo++Qj>N*45hdIxb*xU@DJvVxtj;y zURFF4|&S#)S}q0^Mla`~ZCv+VQhI-?M;j`~6P zat-VsIZETzGHSy2S=+2K-_FYpjq-&HQj6?*x9M0j?#Qwx!1EW}VWIN12w1$`1K;Qp z+R@4@>k1ed=_OcTeh!;i7fNOmS0-eFc?8=Xv*4f}+hKRkRvU@9WV&N0ICRpI?ctZ{ zZ8n8`)FEeaL9E@s&C4th2W3Z`k2x`@y+EUOQ7D1tZ+!F%j%>PAF6a+FK&*_lQDx`7ac-BOZz6Lv_-DO5`Y!(LmTqcH z_E&>PSAU+y+V#9S8h1AL+s1?Jv)bK*%Vl;`krn1a$ug|MPw~qrs*Hdh3f$SDa{URf z8c=+n3;iUTp7|op@Ay5=t+OpM=Fb(QPJg%b$t(cv2iqMPF_Tg!OsjFN;3^?bsm}FOrO{#tV!)6RzUNnC<#ab~bL+Qt|}kJ+)v{fb9wZy^v?mdXg%(r&AupU(c8y zQJ~XY9Byxr99$@p@*-E}9xcS`{q0`w35XTe3zU6d@wlNzZ{g)Bhag9#xYT009ch-y z*LCVnn=)MD>*at0^CHhy=Mj@K)tH?+qVN3Tj_|vS4voS0cV+Mu_35@{Zc&zF?fMFT zGSJJu{z&~tn3qs*yo*dE#IuAgv&q*PX1@T&n(-z$XARgOWM*hvEGU( zELE=-R2=T8C?*zomo>jfL-}7I9J*|!{5Z)_l@#-7x-6t&jYY|EMLMYsEv)lU#Ki6e z7zcuY%yL%KFe3mr$9__GY}f2OvjXalGUQUpo}zq^Eor!1?IqT~Z+403Ms@bIS-BPY znHg`oFvbP$W{Iu*N`@2qD}J%2;6URq1oXo4B3RqT^TNfHlv)mU@e1$5%+J$1(-n8H~lj)ml-Gll(x>v}rt8a9&uxvQtt_AjlhyJVuaezBJ zl(;?jT~F6d`b_R@uDb=k4>si0xVIWVua(`+`5<8&Eoq9iiz!*i&8#puEE%-RfXGZHDu!X(<_`>>`zM=Kk_t`c+2O9tf=HW*q`_3e9 zo}X*wC3f{NcX)8bLB_x|^u(4M38pOkC@1y{Li8g!OOf%Ll_;6IK>vuDivpP~VP)?N z%G(m}tge0{JNT{w*cAbK^;#P$u1`F6whKD~PCY(*(&mKO3K5FpN9Lig3v)RQ{mp;_ z^Ipcqhvpg7D|8>o_IiFafqa+UJ)?a#qOv=M)5j$>>XHw(UEJ6|C0-a!2%P+SOY?Kq zhoj2*H_7cdeGcdUEHm_6Ruv2b01Uy32gKCbb*`_`&V~V@b^HjrDtR3_(YS)R>t(O{>$h8<8LuWGglvACm zo|?VL;S{xAM%e+hU><>kL2}=<^vWgH<;k=ib&Y@|#mTYQ75sGS57quQ_{5!H9>MKG z7(v+kAGzU(JMa}#Jw)Kz zqElwGAb21McgeXoZldqjoatqtcMRyM7EFKkkM`cJbQ!ZBo^HZFs*#a*f92iR#83Xy zA8z_H4`JIy#zuiZbLhhWx8J6@yQ=xi`y^c+!E=s8wU1JA{a!z{x^FLN7ufDquUz31G@m5uaE`OQKOs+6s6X{( zg+k2{`cOu*k>Yw_6Kgle@VQnP?o~sp9{E+z`jo9xRm5UtR*k`8GUt7h`dMm#6rB8Z z`gI#8+?Nt9_eLkLcbv@|aVS;Z_LC^B8Unt-Jk$DYo;x9OxOBbVTM~M$Lbv)_o>4k0HE2V^_7+eSg{Ia;RmRrL+|94TCGqwk5dOP}2CJa7yiQ}A~nUhSLl52D*7iTgPx^RE{^ zMdOZf4qbm9cs-7o4&QPcZw$l>+Z|fPo>$9kGar~J=+&t_ z;g+0q=q!r2yARks0eD%uQKjZL8oA4-@T*Mo5Ay~T5+!;lQ&LX3vPj+MROFfhdT_k2 zqt3qds?OkEwpI{|dE8yC%Cy95Eei_YpixtPc;X@qLo8H z6i=CX0SBLv#1*PxPcOSuSY8RkXZ_%`8>P!eojk^egMJonvp&?79I}BfPWvAn%Nt#T zFA2f88iq_4x39X1az<`ToGCfNQkO)Y)iM}Y_?36?*>}A1yjjq{l7N_Qa8KYEXF8{K zh&ZM{z%$*n|} zhAYkTr-l7-+mHkC)~sK!3Ywa{s*vqaK5Udg=+78*1;SYC$A)gTHf z`>28Kz?KuUjdM}f z$uFuqUNbo0gWr>}#8s@`RzQ*;W3OE4iyEVX15G4qyi)E`Luqx24G(ng#j0uRFM|Gn z_3}Lq9dZfFD&DTw_EA5OZg|hlw(RAB)eq7yByaV%&C+tgmxQ4GxK0ve{#0pR zIY${4bokMjXR21x%iU|}d&ch{P{IC510tK8DCAn+pGzGZi7)Bi+8jCW#KsX`VAQF` zqpqvP-kyuq`sQ4=eguJuO*TDD# zJgNrm1&NtAf;-vst_04Sw)UK7%=)xeX0L}UlE{z;<-2?aJUCw6b%xKAnU1sGuZJ~P zELOd)U4OFGK{iht5}Gm>KvGUOg7q)#SOflws~lxVUPOa^zG_T*VkO7TulDWXCH1&G zYi;XafqybU&s020PU%9LO4ba9!tU#B4ae>;IkrZaDw?cTGo}mVXV(D-B-S-Y$KHH% z(bLR?jpT+FvLP)Z>CSH#Cu1M0hK2;+u;2X#NU(onYPp8nL&r&{P!R(qZ}S`@)X-Gl zbDAFM#hzxFBqHJ30CwSV+C5oaC4VntmyEzzLtm4O^RaZMrhPyI3AP+}QrRi=Dat4@=DH6xO zn5al$OHkMFk1Hxr(om)Ge+EfK zI1oDyB#Hw`;XraYkOmHP76&rMfgErkFB~Wk2fBp=CE-91aG=LH&@&vU83*dZf!^Uj z^El9F9OxSk^c@Exz=ddWA$DBoC@v(23#sBlXK^7jT<8ieg+TaiI^mAj|`(zx+7sZ#TXg^SV~oH3h~RHOBKg~w*?&8v{Oz#)x5@CgALadR^Mb&HLlX3V0M~Q-^8f$< diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac index fcd0e4f9b65b163ff36e1c3f0434fea54eb29eed..8e0c2650be4dcbad07a83758a16ffcd3b1b78ebd 100644 GIT binary patch delta 7479 zcmc&&2|Sct`+lBb%$OOnuUUs-hG8rf$}VduL&A8oRVb+_g)){BQkJKbwNgY9QdCs( zYEhvQqM`*!mQs}cI}cND-}dYKzu))&zVrJ%x#v9Bxvu-Z&pG!wPX`m>DA!~WHaBgH z|N0Gq1_oZfz5!mL8-ffBHhS&!*|5XFz-Mb<$cCU`|E+-@{(&2}dT2u!(4e0h;U^n4 z|8*`c4wn@4gY-N3_rEEkN%FlIm{$N0r+}vrJXgSzugitS!pdA!E{Ke*hUG-1Sb7|< z=v43?G(lE10^@~A3|8!X2Lve~GdOiN`iqO2%gs{ajKHzTKP<%`mckE9{<{UsQ`#mK zxx{9l5lFUA=tF9e5d~aY5X740C{S3yfYBcoRBhJ01woe7p8;%6H$Z4pFp7!*$LaZN z+#Jmu(GS=>&XO|_PsMBfO#Dmb`Qgh^-+oZetNh=U5eIGx8&CKn3xC3sf7cPVqT-*- z!N03KC#;Pyxs?$ZBVtRZvSwyyXVGWQKoW84AH`fvZ$c3syT1fC0I4zO-PiLS#pLaHN?r1)z}_~x2EygunyDJzOA)XyU8+n z9eOG2%h-CBZPVi6x20d0+m430{kM#4vd7zKX4rf!V={M(9P)P zg0;{nNpvcM8$2MaKOE$rbI?26jjP^u(g#9rVr8U$xXa==3o%etQ3f~4x&WA17C+j9 zHhsj7U_wGIJ}TZCPo=0KjWC7%QySHO>};GgAFCiWB0`oPFv{{BgLmcK6vLrId8tGy zl}nmSg2MA>cEsPW2KL|V*?(a#ivMSn=Y{8L6a;~C^50fQ(+NTB|C#c<@Lc>qm#M$4 zeE-pG&&T|8$m~A~zlWj8Jg{O)`ToPRJyYKR` zPwz`gr7uR)5(&yI+nQiaFvG(H68J}1Nm;(=^!AxI>z?=s2x)6`;Z=|hJdF*B;#uEs zCuBT26aqtM6}4b2xB%b)1(5-ogq#Fg#e{^UW%07;LzKV^3E_z(0TB^Aj!0tgl9XZj zmwV0>S@<3sC&Kb(swBl?o+P(FEE1PB*N<)m7FszW2zkV#$pt}qXz@fuaEKDGE@(Qa z)_0}vf9LQxEn(pWb6{yAk!t~K6uF6$VRPa6SS#;X88t2|N?{GIc(lB5uk2hE|EIN( zN)^yi7>T3d6;FFSkaHPvD3Up} zz*$MibTjP(ZgBKuS;pR*3X*^Voy)M4>#lv-RnsHGH=I7D%l>AZ|VzXAzQ9t5qc1ry~>58-MU^#%Mz~H`WTfdVp zKugH2j%)k5{F_;uUBBt}=G=WpJ>dQ#X|Mu0ucAto0Z{aKvvqze;v9U|#lP3TYcd`a zP>@k0kj!jh*cVl0O|}hw5<#X_R$=8qOgO|rmCRNEnLx*N3&jzx_e`D7+T+2q@tx!S zHjfdRbzu*?N~n{(@70PhieLl}z&-qU*pqgyR5)?1&_TnP_b2Ve%aeF#9y@>8`Ko* z&_srugZizqqjk$7mN3e>N!P0Lwgm`W4Vnisl91JswMCN3#y;$_aIu82#U2ZW6M_Xw z9E=NYx13$)fz+w0GF5;&x+GpJ$@d!d<<{Q(9d5d`(kj{Bu>L}S^diU3-re=OnRoQ% z(Fp}}aIwFP?xWqs@~T=WVNSR8^O|+5ws^TEMQ2>H`P~}Ep`}=aN1uYR#3CB{_E>e0 z0|FAIPsSX?BKi6htOl5JBJx z_oRMzm7jF1?XJU=2lwt56TR;HDHWDNJN5K09d5VpXqUVC?$JDs&G1`Qk!*j-JApAe zYB?^jP$D}7T9%`|x5}$`?_~TJuT|(u;RDZMwn#(yKy#=@?b~Wy>`|FzLt;A#3Ld^k zDfU_AP2v{sIKl_QgHE2C(Re*n?0G)i-U=e;r3X-U!${ynQ@J690olY=qbbicpa#j7B!%7nr>B>5O zs|3-zm4hdfFiS!D(OY7yKY4URzemMW=lyE!YSo4TEUa^11H)7`Heq(61F8SW;UlTP zE*x@lDSs@RrM7I(FvDQN_@Qhyv9afVDcNt4|31TcJ~kA3I?SF=<_rSwZs78F%8V^C ztI=LqXHg1$A}@3U^$9Vgz!q0(6U0 z4*pg$3U@hitW4{VDLtD%ofApg@NG)$b5!`6i#Cpz7p5?~mI!Uih#2DFGzO|y4dfm} zinJv$A9;}rS`^Ge0@A8Qfo+g!EfuUS&=f+b+GNZU0$OdcD?kE-kWg(hb|tU`k$Uh- zpMVT$+hbRQ4q?Pphm3UqfeR!4I%JF?0jbcTU>(uOBb$f4SI~(Qs1+*aHzx}&fnQp{c3<_g8BTx&#+d>W7PDpTg2`!k+$Ad zNvGPJ$*n&a)10^WcGy)@v7?S0+`%ZGufMO38B;Giv^8Af7WK5Ab=9K-`vwFgcW$#8 zv<78iuCXJdKLtIo#q;H-XD&%r9x$F{d%`&U6$=8l=DFaotT*ni0EZqplEbNmmaW__ z3ijM`?ki%?zCFgWksG=3h>YVWEZAL&ku?ILty8XA3KatG-i9AS&=`dwa$(G@(#uPd8B_R~jO zwx3*4^~qzy{@A-CF?BXTI8q$&9oI^E`O3gEzpu){E0j%aiD3)BHNi;peqg2gxl zI{0u1l~4Ju7V;CU=ebkN=PBUzaCm08I)UEk=~l+7_4SaOF#u%XX`xwfZ?jt}jDihCT_gNRXFMxIo&yVH|p#Gd<2%Vi|9 zg{RVI$dO2$Sh)pz&5CPw54Uk5?j>#7o)Mx%IM&xM(SbA@wRQ3t*&j$&^vSYR9CR%v zYy^a84I_REtgT(zc0W7yvw1$_arNxl_AHgq7sU$}QoVJOf1ilu@STqEOw_!3Eu#Ar zY2A?&ZE|sR?z!-hC#?G2=TD77i);KKHe>?sHPFdupGVP>>QY&!o0_V6Uf!PivLKXz z>?;dgcHmI7fA$%F5HYyCK!N>9dhu#)1OK=JXJj3ra|hYNn;%>KG6mKeP#C z5XC*%$2^32@1v8^QTs%n8bZc8T4@Nz%gJEfqNZ9lTBDL^iyTnER_s$=kbk|&W2b>& zWCEX8Ld0)Hn`1nzVkZp!nw`7i2%t7m*joTt8a$ojw;uZ4OIfka8$;T!n0wveobVT3 zrz}_L$Dh;&ogaAx0zbk3{y{0n*QR|N{%CO7v}?v9VNvkF<6eol;wvlj#Hl!>Cu!4G z;3vB4UzIQ`?5tP@9`^1sPT6H+5_=8}57rJm8A)oEf~j zXUFudLBy6Sg+&i#8N{DT#ylV(hpG11U4Tmlc}*o_cLO(N5MCM?6G%ifX!h7}@Jbem zqLEdDiC`GG#tbdJvop|9oUl?Uq}@YzL%}v>)mLWDtat~#gf>0*O(Vz|nkqX2@Sw*= z$(_6s`mN&T?qGj)zIMvZMf_~1SIIbwI-}`QzP#fREgS)$& zWmk4kxI;B&soJTH;tlEAb@w6xpDe&R{?0UeoWsvOBB{W{ipSY+MN->V?sLt`-?!uC zOI5wyyXT!$VC49ZrPInS`}C%f+2u-jMu1&!k22SZ3deZXyP#(7DkMNnl@-+%8~&!7dfpKVH?DkkP6 z>N}r1+@17%V0Q^~cN!7lSEsP!0TkSNA^X&rx7U|^{#b%<=y$y!wDv@-OF!|+qSceP z9mH-Y=6_27^6~(8tzQ~43y0%ko5)#0!OJ|7Q}Y@wnU>`=>MB~!nw<`>;7pcl_f&~c zq-BA%z#BRrjd^5bMz&0~wBG84@V9p)=ko=LV3`A5HuT%YiT!~uG_edbk7u|C6Z*H5 zC&fj5;(l9Xy|oorumd>+fD3`Jy33v7#HlS|N55`e6+&6Y{M;STk!e_Rd&dl3K<4)H z+lPUJ=y6P0-MWY_rFT!Kb8u~3=X2z1@4f{B=ia@e?dXxdn>lTW3WMuA6c$9c8dkTk z`#Pra=FB-&HR#NxkL54QdE>V;A`BFV?uI4<5K5@~x(F{$HX=P|W9v4HrM1pGq8+%UgkoOh{IE@q6-k`5w4g+JWa z;C&!J{Xm~|2uBNGujxG$_2uhG?8yf$%+tHRoxRM1Ru!<%B_2NTsgOSJ9cmaHklK>O ztjM-l%)d0TrQ7MC)TwTPeVHJG=*L{jmxMGcq@+QSTDm!hEh%Ls6JT1LezAb?|-kdw5BBD>+f^vW``1XQ~l~ylu8#gmDjQ+G(oUF3yM7vl5 zyRiSJtp(4-iyBFd9x{jUS1Cm6p(=QO^^s!0;cHy8HYP-Cjh95JJ@bv3z_rR8WLfT2 zTI;=R7q(dBsx?u$z(bU2H`5b@NZGLjlu1k(T^kmQTm3J+ksq(*HUh3My%N)K0AZUxcnz>VT zkD$o%Lfxs?dy|CC4+I3d^ACoe^AtFwcBJ;;kpkc#6#&T!DSRUQpE4$lJcU|nExJPQ z(W1Ny{q`QE-z=0A_*&CnO+uVC?Xg9GlZF&(k})?(NF(UD031?76c}V|37}C!Y#88h zA|Zhc3VK?Zs3Ym1qnCu-03G18s!>PY11#__QXLW0B4aOuQR;}k78#?&2hJ@Dz7o8m zgHGg@xjY+2TR1N)2#dqgFd0^c)nF}n39Jtr!*=jW*b!a>uZ4euePMri3mgpZg2Uk` z_yBwePJ&Zl1Wt!fz-QqSxD2j@ufw@Hq@zfr0N};6@DGi2?3#A*amc<;lOu7wSIkSEUaIan3dP EFVTquyZ`_I literal 17931 zcmd72cRbc#{|A1(UG|oeojtNH3fZe<6^e|@$ZUvgnVH#Tg_4jRWu##xGh0>&*(qBk zMEuVC?W+6pz3=W{-M_zn=kYi^uXCR7=j-)4^L@^BSy)-QqESdXkNXed9kjn>ZQ~#< zZE@kk6$?)rXK86$3pXnp*FD~U{)Ke*${<19gD$QX4%QaV)-K>lH-`(CEU(y@?@53X zdmC2^YYSJ4f9Xt<>EVG-gt(HUx-kekrF2#g21{s2g&O>)0ht$M5s<||J_7Plki|ik z0r>>TsvxU_tO2qn$XXzu1^Ft-ULgB{91U_j$f+P_f&38UryxHAxg6w5kb6O%0(lzb z8IZq&EPV-0Kmc*0`O&CD7?8xV_@ER{9D?v?MnRN5mVB=Sh27cR-Gwm`$1fg`9SkBC;oV5ZkM35+5fnC_B^aMZnL`uF)g3 zB;fCthZYkf#==G*kU2R-)u`Fo7TU;0#tRM}C;b;hQDGC6#)a!z)7KC2W3a?Hx4=-~ z$2#FYU}))?{}1cNKUu^-+tTf)4@l3VaK(ZY#H9bH;2#4KRMG!!(EsB7CVcEqCx|gj ztYoY|GYZW{b|RQwFTt%-of=|`Jab;ZE9%U{|FMb4{zpCvMMcld0yhQ~u0I`R zPq1p@9q8bu?weRK*CS(2OU^Y|4X?CsDF*Bxu(Pb#=_3n;JSY?iDL*NH0M;_q7~NvD zWrdX!^`kPQ$g9V;&eqFK^7t)aF|D`UKc@CPpvNA=#U}V`YGP3Tzit=|K3Kwp_`%AK z!NZiJ6pH276!^z#^$UW9H%|R1KPrVW7KIIr3W(F{+OG{yxa{98e_%Zp z6H_BqBUL7bNXfw;ZZ2*%u~3o_k&z%VQFuW3gT?pyzcWHsu-yN&B8Wm`NZ7=-Dx-#qQeO!k zr8xix_p5LIPuHl3i~oBg7Q_rL=U5FTkW=GvQ2hQW*f=bnh+)Tm|-eVj|!M8Qg31Bv|$U@(`{* zLP2cW!3<2hRq%80RsvhYs`=e{{i&_o0=rAhnqL+64J_v*aK!;+PsYc}#NYS$aV{qn zyS5{D(|j`i40sS33BRczz#hiA^WQn_usEDB_(0}2^1pU3+C3il%kWE==~o#_;8!RA z8ae;>jpgTS$v<~x;iDkID-i*f@~5k!7!ASJGo!((N=%@^*>J^s4gRwN3pXrWgZV`u z$K$|JDfo%ii1~S_5I^8|q<=UB7KH=YYaU*xvV%@p*agL2Th^cz7wq$->YO{T6CYtI z^lP=J-dar2V=zFC1}q=2z`uzQp{Qtn`#u6T?4MEo!{#q2#7}~y$2lpCp&|IyVgSPL zrEmg}Zx5j`1i#pVOc*H+VS4cA*YX5Dq8?DeU5}*SRR8RoUuHP4NfqYs3n~h$Xs8aq zjRN$9RIwBR0us})2v9hF6=aoAb|O8Dg3sL!deSzd_%x(pR2bJP*%&J3xmJ3hyT1H1bHrVN-6>Y zY9=Y!2;eHJ7)HSR#xYSCps8Tw0IF9MbrLP+X&blfDB;pm*%9_&lMrhMe6M|B7o-@FMQ%dVNdkLd`5s3 zo9mtf6%&(t?R=fV4nFY#Qu2j%a~S-kIVv5!ahQ%Jxzvjc8Wh%0=b95zgjbR1hJcbg z`dbLdofzCf0P(A2_#}zKc7E@hfx%yzdtO_=H|`U)g@q9T5>AFS7yzvuDV9J4Xwzr+ zAz=CQ`6CDzryJiJESn?p*Wr@Cw0CV`BMuR;=`mhM0Lwo12MD0i^+EatyA_>e50{{@ z^#}9z%uNPl=UKDZgP3g zm4+9tv`9(qSl|#0{?eS$$CvHMbK&a&ZxPU^o|y#$;5up7M@IyBmF$0q0Pf50#}Lre z`UROCSd$Bn2H=vvv}cgW(}M_1H62CX7_h3O&UcU!uk*7L2e zk&=ntr_C_j(tl~sz4vlDqQuPh!4Lt2?WudMxsPdlZ`U1{yeS7)p|A_rmwg+P0B9+ zPg{EC%JWG?Y4)4y1q3ikpYB3HjfUS|8@^Q=-CL5`CP(+ill93P$WTFHo6HhV!0f+_ z?ta}EGIOvuz3+&_&!Mm*A08tsAvXGOi40OwF2}QqfT-PhDFiHvKR~Qtzbb`)SdVLqK$sNgD#5$4g&D0P5{BJpzpH(IL1pc@s=kA%LX!!d~0e-MO$gY)x)S<-%1c?3=fQd;R&CJdhtLIklm!2!p>gcO{Ww z8F}s*UKKk62zZC~_MoVs=q;qA;`vc)1gN~3*hRp)miOMakL`HPeH$+NOLI%b7SV`8 z=~#Os3{Y69fuy}YTO;Q6MoOqh?;-cW*slbn*+_|N#^TwHIG*jo?kBcUCeD#K2Mcr|{lU`DX z#RY z-ays`*lzPqqJtbUS3gGZkhuV+O1CxUyGY!AZ&}MQTZ~hPn=*(4PMGBrPuv>k183HG z%`B_qDe7KjXFhwUpC)8cV(3_q62nXewD172l)f6pvfR&a+C(62HfesiQ2v%q{Ij;+ zHTIE~8Ip}@B18^_Ss9k%`ZD>}Bb$M%_o5zk#!rDH*K-*%-fn zjWr3*zwTRPEcYsx&pN&_oB3jt|HJH4R^D@$x#f(DJ7ymHc!Jn*ydN*61AR2i%yXo& zsA98n)~*l8>3#8w#Zw^<_4QT@q(Sx|(0=>PDz0!c{{^?pD9T+cQOrywxl^%}pCgC4( z#XZv(C zS0v!Qg6+~Dmd&qik+}T@k6=za_1MLEUa|86DHemQXIP64u2uKPbuLj{ZT206ZjS0+oK&@5`d=&^Nu_(G0oEaXmYKc>IC6j#^qiT1Q2jiJ{}|NHh$M zE@5%`c)e2iNoC}#(kzpe)|Y^%2=iEsXN^_%$5EX?eXRJ{-KolPdq94`z^2HjhGjj+ z;`0JHYx={yPScq26}qN~qm`)eiPWpD2Wxn%lN+3^3tn#rJ6ngE3LujRg|Ri<5Bcbo zsCGM0my92O>gFiuVu>1(b~bk_k2H80PtA>M=eZl#OEI z%J{>#+3aKls9U1m)NPDmJj@(q7M{|!Z%!WQj~V3yc40lUZ;?D9Ha;IkT_o#b%7{qg zrRWJ2_YGm)Gch-Z<6V1`ae6tbD?Ud#qm+IW6GC1iZ~G1ySBGljJw5d;sw7^Bdi?2O zpaK42WHf(P}sQ?&oOTW`*D?K1`)}Q{DwF=AwN&?%dr(wSC&cz$RFGQ5~OG z@q9H5nuboTx8$?0ln59<3v7E}nu#mV3YKtsD3{vHAPvs}OZ#l?&XSR-%AJ z4)W3QbKyN!0c7bp-@OKOWhth4!-`HiKeU(sBs-EkyT2qCXP0i|{RhJldnFYoUB~jZ zumf*@$QM1vwmp_EX*pAKHSqBwj@P}(rNw7QV->9HHBokOnJdVM@me>Vg=xw)iW9FR z+TFh4cr@?qIqA=laePp}FWSBK#_^8M)~T9C0;%$O`KDOT8<%n4iDMX9>3M4I{}B4h zCFR|1QruuSY418d@+|h&0RnkO>FXEZU_tUfrgD(jX)3CZ^82Mgk0;C{WXETDG;~LO z1@AE1a}p?PN&*hNC$S`ajo=-ASYylM9%^GEX8pCqrk9S~r6+$qH}mtPcAo;g8G-da zs>QJ|^Sb8s8I8zHZQ-4Uwo5XH7b5ye@&}fm9P*{$*C%tl$4RJ8}CnE-0etjUa3!~ zi9F}bsEM;%PM7jy{7b#5%Te8w08@rxGb*9JCWR%>hV9HQ*I<)T5C;nEg{a^thqD%t z4YTdnrx+Ge>u%0-rNlmGTEo`KI^G7yWf0on^a`HEcC-sx-!>zdAbt1IQ-~vj_RANK zmN=3Xmg7M#Ed$`1%^%jYXOi?EDT;QiJ?T_7)XYJ8dU|Oj+5K+z+}FpF7glm86+pZ& z@6gNkn5P|v#6iz9$448>e5=G83MMn(uqgDL z2pxI#KF)YTbHS<8#8>DVFP;FLeIWn*$;HoKT>SjO#n0bc1mWxsmN2^=95&UJek%V~ zRp+_iq1oPX%LO+{8~&C@{YR##B5}O)o3C!qM5i&%g@Rn$(GU|i( zq~W=!wOXOf@29~yfG@Dyr7p~QA4qDt?&wQCAU}J`c!e+5n)EVhr>hnx_xH1EfjGM% z@}yVM%}S+GuNEi_(kyyf?F#gD6ZoV%;}3hQZ-q|%wA-cJ%O|ruc@xJ@qiZ;kB_-LYHl&vZQ?&UvHMqlEmpyoPVr>eOh4xclduu zPCbpG8!)s~C#=_vJlx?b&m`S-rT@xL|HcHIjyes#lqk6|a>L$dMnRiRdiVS~nbqf- zKOMi}1@8=#P4G z*5{ErJg2OyNUUv7(-e7T*|=INWJP72;~LpG)NXed$1C@7{ZYGE_-OR%mcZqlHot7p(+ zYv{)5Wxj8$ku{eZpXzzQ_{AxqI*M^(UuN6tOLFX;VkS|uLD)Hv{FM^x99bg<@EbCd zS++D!JaM;eeJN*v=aWjVJ{-s4WrhVD*e=?e{=R6%uo!*ot7g}*M`-FDmpPQ{OIlj}cZA@n{XxEyNf6Y9-=I)8}FHT~an36`Z*rH)Nci`$e zp@BAIuj3a7=6%J&j2lL*vz8!U_}aJCC$27H=`bJf(S0AHmW4ek8E=P~GVkYfc}RR3 zQ>jU61vm(=_nmwKmV+E(9M-*ctvJ*`D@)tD&Q5iwb0@w3p?HQh(1YU;^LSp+VQibJhy#X}EHUpxDFO^r{5R^y#vho}8vm+2f=Ns6GUl!VykbOMh!9V>My7E%Z)q*6gc^ZoK!m`n&YQ@Tu!6Lgpk>= ztMQU9@vFl6HL3WJyPan}ZsP2^yfF;QRq*jEWuKHKJh{uqsE{j&(Whg3as)N@j#Wqk z-03NTGYG1nDLZ=py49dR8~*4_2u91O?d(tj6MpKD{0%vaJN*N8fCKZgPfhlkUU@26 zSdk#0#{2r^%9DbxW=r)0o=Nj>ZjMbQgT_NnPNNa0d`8 z%qu0mC>+x39$OxE(QST1=TNT6hExWjAUmCO)ST`v>*8Zvyfb8N3m-kK0>oWS^3FDL zNEjR-2UFJR)161!DkU1T+&~IW{+>71pVN5epYCn>$Velz&@0rDER(1%lGhnIMjI7# zpw9`Am_eQuJisTsA#&0>yaRtS40ZFgg***!O~%)|xla7%xz9toaCV1+o3;;S>?5p^ zzS30C(cDyzFJgU>FEz@p^1ODc!P%og?-i`)rlhg+G)aqabFypHP3YU}?7oM}nr51J z%UZ-7XY2E%z&*S_%+nBRBry=8BGkU#@OGy$JI=A^gW7>BPu7f0ockIwBF(pPb}`YM z-GbrqW`0-M*Eq{%bUW%e!!@GELrC&538=7L6tL?LpY=jzj@`c6?UwFCS7R#TJMCl4 zWmd3Hsx~s3E7FkOwy2Pw2#~;f@TYr_Yak9-Pwi$|$x-}nW_<(o?RZL>BFMs*Bd|ny z)rzDs)bGk#u_qV@Fz>|GTU=^X+U~Ck_8V`k-rLQ3FyhGjnAJRPZ1!4qgU>zzoZYp2 z>=Sod9m(`O(!#5^cu*u?xiSsq$S%zJmZS@XzY_EYalkkI#vdQ77#XnEyP99`%{T3O z!8{pvJkwHKJX+oJ>@y4r$5r4Tyl)*5*%4~xe8qH_1btVMG2F*RXog?XRLf|!T|4wz zBbAyj;DECzYV6<;S!ZFi&~{@_tc;(C(IwTjjn_4VXW45kcAqK?YboRW8~Tt->>FA0 zeZ`~e4dv&u#Ra91wen^|r_}(V$d3;T^?($d{I%IwXzsA`@i|ZCrqdA6<;#U=f5aFv zUPfh$lQhVzw)g}8V7tyA^_f3fDseZLSJny*pJva})Dc=#Co&kmML}Ia%QA8tXP18S z#p~Yl=S$etGfE+ifIPlz+r`;o2Wt1?43n`13own4D)>M_SoP&Az0dU6-gh%Anw@;ioU8haXjT z=HdIl{>*s=r{}R6bPgQ?nQPoYX;iVwm}?lEmYG(x{#NrW@zPg}bt%{fQ5+zPkzHs0 zmf~#XkQbpI5cKklJ_RrL9kw*4h}|vynASX~;Hot+)oVn9+C+={P^9 zA0g9er+Z$vwpOhLM)yU4cwyc_NKegT;E-$hlB4%+zjKq^of{txi=>d;vX9hz`V=LV zgY)lC`(+fc3;Xx&N{rSuH)7#?E$(ujb3J|*)66veWqKkCMqib>qXQ@laC)=z#jnp1 zc0PQ_qW4_Ecz@YQs8QpqDeosjxnhO9&z@zy#qm&D_sC@Ny*dp(o{|tV6JFS?$kRNg z2h}Q;T~b?Uye4)F*aV3!llMrRC#%;vZ)be{UJCY_)e8zeZHh)Gvq?$&3j#uV!HcIq zCm@p8nQ%O%T1~z!A=~mw;SD}tu5Xo#V{rG=Mk7zJwRoRZoZUaI2XUbNxPV>(??LI} z81y}%x8(l*eTLrO?31|F2esZQEOi=)g}mGSg3}XTsM20|kd)j%sC~Oy~N(#JfjfYwT9Cx z^EMW%C!3k@H~6&u#33S)|IB>?i$*G9Z&kXx=iSNSqYRw_O}>s zmM&Ba6>I8lZCH6zy%}3PrXM=RI;*w1iHjrQ;f!>W=YtRe{SytTxeP=BM^D~-=hKk3 zGVdh#{p9j+3ee&Kg2xue+AngaO9=?xt$uTgb9!2!c{bT4C2{-1U|Q>si_h-?|6tyz zyLs^Kg@uZwvblxGbpN;2?MH=jE7MoRPCPk37#|zGi?h3nZ9UefSKmxe=_r+>PN!P( zkU2MoH1qW2du8KO#y&w{zWMV4J!ky5spTDp{6j99g$E|)J52d4mmWGb$vwZOGXnAH zsPBg_*TDXftvpsGt0r=vt<@^y?VQ};2!FT`jp(j-tBy6(wj5hLJb%F*7AjARkk#8g z@Qp5!9i4)TuAq^TUc3c%XUNRDKq`x*B0dAmBiQb!1t-nuHivVT+Hk~0(``ec!Q+`Z|aoc!NBm`<~2O*Cca%s{zpg;Hlu_DGsm4om4*v>}&_Ds&V^#@sJw7UkDO6?{i%guw5WZ6WX5|mO_8UZ~NxU)m$ z_!C_*p!_}?`bjJ;<3+09(R*B*XPRZrpDRV3`fllykq_DrwmUp*Caq4CTJ2iST}+Z( zmmWTyf2&&Z^f-In{Hg2d=iF^^c6;kaLir$*=>_2k5gG_6zA?MJ`#}hk02P?h|8_26Q|1|8e|Iw%7$DsYd-6P7PXycsg?B($L>V)ilte1Sg zGB&A49BJAA>Xg+Ml*2n-1UNA70Lh|Y0_nZvLBcuyQzghT7_-2IdQ;y(m<8Mhb9xf4bLu0%C>r0_EP9Kd!IVn}2!IA;?iVHl>Ju zTZVPwb*;M7hAg+lS{dNLyvQ?Exg=!F)n+FT={vu;Bl7NoLqqWWU0DJpefllgn^a{u zyS^fy4D_<|W({=i;uyesgj$&WcS-%Qtf|{t}Ralm8j-(@VB43gyEpQnqx= zY&RncO4O@_l!n^Ni%10DWzOx2|GD^N2M~%#V*J zGontPqQ*aRhUYwrzu?5rcA;FcH_d;|Z2IPE*ML5+?q!N=>gye>tm{tb)xhrX(486( z2e`9CN!at;^>p1}$l%H1zMJp+U|m6tXS3n+YU$nV50b{wQl>b&*y8z|jB zPv#pjbVPg?()ILi%np1WNfep6dk)!fs(Ba^pBXnAehk{ zR{B1_tTo}z%E~A5{qM?wT~VM{r?q~<^@+#!RzZ8f$;W4o+Z;1nCPq>I$T;wIem1+l zuL*Eq-pkmy&|HH$#jbr>Ue6E5Q|xlMr?<^SRCFbC`M9J+UG%}ViyOzMg!3cuffHYE zYJSfAa9Abp28A7$&%wN%QbW%r6(N0~B?fX;?)HylM?Hd*)17Y%+6^z<840CjET{9? zrrk)MFEB34#p&IP+LR{ZeP;1KZ(9Ft29YQ6ZQuB0h(^1AEKnw*Gr`0bXn~|yOT?KY zo5^dk7JIl^&?7`WsQQqaZOY?P=GgPyPc>}C+d%)qcBk{8_Qsr&I_V+F9E*WbOa@DL z8THAk$(akBPEl*6RP8_u<`Fs=B=ufRt5{@Pnn>MN*9b^dni!2)CP<_CP~~q!K+*x` z5!^0B5mc?Wazj)fEL*CaT=ODfl2W(t^0`OwM1Qz&oi|b)`LYKLg~~qnL)RVA{a>+F zgM_ZlI;A%ALi>Wyi_Sf<%c%X(R3pJ*jr836%kRD> zeDa_AaKoQv5Z5lUHi`lngCF{N{5H(pRn1@CC++kIo^>Ryc_f=nEHuU64YZDe%(3Ca zGe5TCnL3z+Z*;ooxKEf0S;HB*;LGD{_xfnmeS1K=z;>^A<%k@k{Ulk7ca-)0F$MAh z{mC!OlxmL9hf>=0WY>KgIJ?<~&$YtPR}8JX6;?d!k~dFQl8BdDH3W;xp7TxYW32{K zaPrsb(`^`cUrex2NFQeG=C-YUC-aV2=m=$yQ>Mn&0}{D@KoSRee-Go0D}!qWuMa&e*QdvuRWsp*1U&v0|6;4hr%$#O^zzu(R#Nzu4&lXpt#Ly5|kpJYkpAn*<5nbu|T z-my`o@YpRNeSYVIqI?la*zEYbaGyUNY3`%5mDLM>A*Wt5q4 zdK)tKN91pYVc$^Hm&_?KJTjr;Oy+hQc~7ie^5k~Mz9aCMg1-atYTJl=5Zx+8(#JKC zcdg(l20h9(c4HKf)e=~tzdNtDNf^Uq|Gw`e!q zAcCcqWidkKw#FN}MvoC2Jhb#ptbNt#1dnlG7hdn}R!+F3QzjD8oS>pQrFrEh+K985CbvFR*(I@G^CyO3be}@RUswRGQ`;%UE{KqSn@op*^K2&cD{D3&T>bG1`sYRh)zi zRt^DCyrt&(ocuuU2zlRirkVsU3`eOHjyH;c_6mnE8oDg@Azf8GoXK^05Q$rp5PI# zG%o8932a}0XPRm8S);V0XQ_mRYkOb$v`t;7-_wf}SG~)$!BMx*VyZkux?l&p!v2QC z$h5runY_82mgGh_jyF+bd;D#ph0r(l6OYzV4ZTvUp;Odnh-GxVl6vMXG-$JNymZ+s zL6p+=Q7<36-Q7N@Gu$pcZBBf*flO^kgVwI9QWMbp;qTzg-ad7+Trs;N&vtd5J^R)c z-UT@)zo@P_&ES9!eosadR&aWo0f~N0J@O?ls*UpZHIl0FNxMrArq(LeKhU`sqo%FD z0Qv*g%kwyJz$GlRXsb@!M}1$K;XOCo(wF;IKFGX~y4lw{L&ptY5`y;QK2Dgiqv(*+ z>%6*rmMSXf;GU!;Y$>}#j+gWq2gwC3__FSjUdbO5ku7xX+%2EX7xqJpZI9}Z~#?KQOjx*k` zhcuThR=loWd$QS1K1UZ4nmijoT1G#N^Dpd3J;Ac8JXL#cM7@2U>aDbd3eFo}?c2hO z>(IQbt!rO_f3iT&R3b`V`F!e$%xNs8-PhY1j$L1}ZH=&%w3#ht%;zc2tN{*4Y^#or zJ$dG0r&x#@C=4y+LYhU>oZl`?#5`6F4GF$(zxxf4VE;zdatyZykCIKIBKnKp<~l~G zVW_`nH$KvfImJ3bOv=3u?84);Yoe-B;a>VKIiazJz9u=>W0?$1`+$1L@etnNaP+Rg zH(VS=wo>OlrL4yCus68#^i{X2rqxOm9DXiD(Nmvd!p}^z3AE%v9*Uey)!iE43NRg2 zB8hu3UY^Vzudd-ATbQq`p-Suj7=qJt5{L#m2r)sN5I4jN@k0X85l9o#h4djTWC2}( zE&v;LcUNC6beN{x1ifl5_AVjfij?cs0b>7o(DMl ziU%>`K^%CH7#<{z2g&0>8hFqdJjfUia=?SU@Ss3E=q4VNhzC8ugC65S&+wooJg5^7 zdWQ$i;X$AApl^84cRYv?4bhdr_0Je9s2+DW$2%N#PFxX^Pj$y w{nH}-r$zTqKgRgej|l$sBa%OTnf<3j%AXF~f0_(``cdAWHZKTFI3z*;55trD*#H0l diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac b/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac index bf7a8dea266f9877563f2eab003f2cf1c71b1877..9bab86857e41263ed164fe0a4b733e635d102a11 100644 GIT binary patch literal 19129 zcmdtI2RPO5|37|?y@~9R5jr>=qpXxD6lG-P*n3nmB0@$+AsI&3%;KhAUC<8>cq78b5Z1l%-mzmX&y7wjyp zF3QN5+1Q*n^RRN3kui6+v$U}SJ(d^jF7D8O{`snh`idsQ6-4sk39Knncke$wf8NR! ziQNB>?jK@ptej0Qm|Zjjzke3HgBuRia)#y6A@Y(8ywRjr+5VFhB$D$V!$7x;%*D$W zOwL=GxmmgVQ^CLV9QcQXA7)ycUAC}tHF37Gv2$^C_CO-p|3QV7IXgPKnw)q17rFoZ z^FMvu*6yGDf9W}W?5sA_HH42OrvbnBfkzNLPT*Mq&rN@PLP8`jJ_;W}iFKHliaL-~ z`U-1Q#!5PDQ=2WvMWTO8 z#BPNDyG-GhzaNfTsic0&_==e|CTAc2_(U=a*%WU zcvN@i)oyY8Z4!ZaW546((N7Fv&=1dYF#Kbh{bKkUl*WTU@`;h={+CR$>}uy`mBtY8 z!CC(y5Al=0A0mEb3pB>EQR>05vv90$VEV}$LGtElH1T8HtQ1l!O94sh~ zz2JvE0TxmQH7w=MZv;0hC`U9(iU5t0Bmh4SVex;Apiw_%1&xxx68<$JjwSl*4>2t9 zUw@$f(($8$xJBvFC>jD$Iy8!cKy(ipMMfY>iAIqCb-QE?0kGm%DI(sbAbwW!t6C(+ z{vs8`*dJN@c_I=2D#OITDu$o+|JZx~gF{f|LT%S-Ed7tz{C^(lpOpOA0HKZjFV#PK zq1gVa8}9%8N{sm1wz0F`|GeDq^!$DQ{vYfLyLMur)c>eVT(l^$ZVYZr^qB{Bu_LXi z@3R|1?n5KgSW6rMp4Av?QLJqAefLe>^d#)kmfS6qp=d3L;JCY<{4~>cAyD^Ydj4D# zKNaA=bb}fG&!q#(@-GI6e_h2t6z&=UPFXltz%PCvjNreNwufogtDltYLNv+@+IfD| zFwbrzf7*?p65RuqT^eX(WBS`h^;b8voWhL&ADo|liQWTg?bQJuD+ra2>hXXzK78} z@!TkX83qePx9g9=U|*qS6AX@%8o>J*c7k8h0PYcBotT3F0y`3H_Z$L$sGfAN84b(v zCG^_^18Z$c9~h9*H9UZU2S0ir3@AGK>tIkf#SgzIBCu3>9OY2gAF9udXSRhUzToiX zhC$xmF&Y?T^||b1`o%WQj)@FehppkEV2yTp2q3T_UfiV+_(SzFFB}74IWH z2d_BxG7R$X>8!&bWANh^3`i>CiXec%Hm!AxK;RG6zpQ+_GtFYvg@s{(q&&<^FxZl2 z_lJQxQ(6ZMX68-BVKBfjfGfT4y+^J=U4N+l%D2@hSfW*%0dy^bz_QbFJ%Yg=O)ogD z*!A$c7vP?^jF~&?hy0gGp&kTwHD3l!)gP*VRNb8j%VD{DhzthCdgTN#c-JMn3WI2T z_MN%V>p!#;kI55c-=HA`R^oOIyg2-!`kKh0*RY)L7x8vd-YrP&3HRuzO}M}yBBr1n z1`|UwJC;?wa;1ZM5ZKCo|85BUq55-Q%Nk%gH&*;QVbF0hB^3r_&0jCUz%zgECm8VB zf9{7t&HDv-Dk89kHrZWJ*B`3ai{)#BB@Wj}2*5yz%=sP+JWa<2VbJ6ewzKz?iX0Pz zd(H$g!HYiv`{`=a1E}i{)z59m?Sm!CxRLHuoLZ#z6x?&Tb@Mw6zD{ZEEC9@p1V!N< zt&4BgAb`N~i;qx4;1AUg$Ia|y+Ig;$4;J`&<2eru;y(u+gMp~wcj%%6fwetcvy&j= zXMSIx9t3u<{Y5PV{!qP_t4A{QaV%5p+))^8uPfxhK=^h2&Qx>?@DhT1Ty?48Z615` znk`(g2&@6R37#;2sNSvJJ{cCx#yP4F1EPkco#4~54(uF21|)CE!9y{}mv_LDjyyak z!EwX>zdG%t%)T6gMUO2j*}#BB=1emT%1`<1#Jp3b58hy~oI`y(Py{aw`fj?A#K9+d~ z`Z%@_uY?N*g#6t*t8Z99_&VHE{8GXa28u0%+b~#By9)0D2yEj!-aAm&AF7|uGYf|$ zKI?C&hJkd~-JO&#k?>!IduaL+U16|DNR|cz*N0Q^W{AL&l^fJSU4N*4WIDJ7mYDxF zWTythoqX-#9wDY64;bJNQtx=5Q7&`~?r~g@oP_`a`>N5E00Mug-pj9w1C|r={1m)= zVH?J2{NSF&SV?CXSnRpn4}$FPjTf2iKadvfQH<4&1t1dG;J zr`$=0n_VwFxv`^4Rj`NHOCDzVFbCcB46aiI)>`;W-+!Ur?^X&oEHO1U{51@ogo>Pk z!Tfph>o7?2r;39?rXR`98wjsUD_ouktklhCRM6NTde30hwX@p1A935UBk^6o53G#5 zKYXVM=2wo%!#yTy+}SWFzC5i00R%Rh$4dhOf2iIIq5Bw?V@w)m4g-1z!<{v*jKOxN z&=|8A2H>IV_hWYEeL>C%n2f+)SMdvl#{Nt_icXY3907kN#VEdeaRa_VB7tOr#=|WR zZpBTYSNNZ zflI=ws;%@b)yWHKXg1+|t)fGQ{mCTO4;_itk zV022wev@yv<_MT|!M?m(nttzHY&Mosr&4{m(`*=o4yr=Xh=F1r(O*dug7y8&Ehq7> zY$V?MZzDtftB~zFg5Ei9ArQXtpYvu*d?GvaT5cclYFfAxfZtLkNPaK6lDsg_{J>54 z7y+#HZD5v?Ut>N`$+y5Tb9JpVq2B)5_`n!#e~^=SKZ&FBVI=Gn0^#+EP4)uK7AXZn z@^sBz?Cp=7YA!rCZoIB$XX5?fdL$1=^KI*kNXYNWOekbjAZj*>N{CwaJo@s@%F@F5 zxg*vy)bLgaxR}0gW1mYzMhqg%m~TIB$v8-F4(~eYrOHdBmjkz?tvh%Imt+r32RifN z?eH*-7u?Q<5-S8GWhP!WVo<}I#J{!2<)isvZ|jq);EQkg!46ab#fJWN2|*7{HrFNN z49i#NCz93MVyzQ3{oN;y-U%R&41za5NN?>{>_s_}{U0zw`%DBeC7SPVuZcwMy?WR@ z#2Vuicv%7X0X59biRYKAC;dlO_>Ij=qbc81q@@&o(s_V3%h$j3G~qTIIgka}_>8F( z!9Mc=WmH2bV?1OckaZ+NBf7A@eTl2LZiIC83o$GXftl@*-oG%^>7K^CKkt6i>cjmE z=}mbLvfJ+lzE-z2d}>Vzc=!kejfzI4Lh*+2s$#XbaND_xgCF=P_vtbhJw0^7R+{sB zT{B3WJfzq3!9f)^KRTs3gRQ!aef>|PvJ8yk%F^`r@ex1Ko^#Hp2KK=g@+TXA2{8x})U%DBp5RQk;FlQfeDD`FmsSUyYbhopq}>sbFLu@tJB? zwnNvUQ=;I57NGhbisp$&e3njcRx`aB*iCx0leqhJ*yzIJgT<`$hD3(a%Evgzr~ za;AGZgmneVu4ERjM*83P3}(J1bi@lR$|4lwr}AHCXBs64^V@LkH!@I!JV;iAh!(rMa4_%Z;<)9Z`L zC%YoWN*LFlAy+Yad()>tsi-#bY~#~GOYh@*ZnA**5dnF7(R+#SxZEDS8Tq=j_R`$B znCuR1$EDf1){B+FZCvjZAmc#uwMaYiK&>n`)3n<;*#3BI)BJV$vXHLKN|W&(_r2NU zc}hDx%po51RpQ*spOD{M$(fR{oRm~XUap&}1IAQISd}K0qwIhO)oQHGbg-P*XW~ld z?&WSrdWt2zXBpo=Mz#+SaJ(xK5I}+hO9f=#oeckmkY~u=+(pe%K5hxZUZK9HmW46T zzNr&MzD@`WSH6&GN%b%*;sh<&h>~A@2wwP%jAR6ltmY zyg@GV$UUkysnNml6?G!)-L1{}8of-yLY_l>0`4Gh@(_<+8=ouAGfu#Gs3YdBg@5CX zL!%#3wRzUigz9dKqipHsIK98i8@Mg_@kYLVot2S5;=B!_*vlFISz$OGqSPoF(MjIL=7Ey0TG%@$%`9t zy7U?}51Pc>9)zb8$x?Mn|cybBi{U@Qc0D8>1_B9#@1p z_vp9s5N3vF($adhkFkL`L-M5GkA`$9Y|Xq>HpWNqO-mMazLePPBVFIyBbU`*zqR)O z@B`x66>ygE1p8n_@OZAeR{0FGM?D&73E0EE^yymTan+RQ8#ukcs{;WLXEMN*6}Vw8 zyBu>svgqLVS_bSz&JfG19FhmMwk=6wW~POs;a3h&&nz5FeC78y(Tn?~ueKWIRXGe@ za2@C^jEtZsJi;P#&1Od}!dFU>k;G{@tb=OJ&fonxTQB+XF}_opecnR%*e>u8DyTvm z16WfL>@kZs`Foy}Tl2XETN#R3F6LXcGf=p+JzmL3nIBT`$lMW#5t=m&WkWxX;Qg5C zd-=%Y+|GuX>~*K>RGEn%jqX`#Ry@V==C2Q8X$CMh$h3)&S98o+BwsHxzS>J;B-i)A zjzVMNHvFCj{HUxys#7I1VtZ!l(}9?;d2iJoiiT|a3SN6Gzs^#quJB?UC-0;Yas#VL zrzQXJW~5_ERZ1YknU}6k4`0w0Z22iM=@y9X@Hkzj9SWlI+AiN=DtGa}bn?adOM(y8 z@3pFJ&d1^ZnRl7Ih<(=vod``TgJ$+$@90c((IPg)Lsz!$W`r=@mR9TsFqt`>1}w_Cks<^-XS1jF)}=yAYrC z+AM*u#s?>GdQF)<=dSM0vome>sP|bc3{(;1yZ!2tLa)03`g=lEX+97IHD7nhqn&7{ zQ})pqtO+1!$F4*pVw_qK8Q6=vkFbu>Y=heXWB)r zi$5XKty2y=klH`-PzKaJXs)8pXQ zrvj=ME3a8*>yH@zkLt36Ow7+&6jozzCi5K)l;A zDUS2{%8Mf)Z(wgl)R&%DHCi%ZxP=-)+=)HlEq?H%AX#K>&x>1?#Timd z0}F4BTqHCT{EV1;jH%HbHAkmCsy0%ZU9TJVL3K|9p6g*3&zd2v*sK>{VA#nlTX}S! zk$7pn4_P5^?7SZJkcbJ|^g*3IEsShz5VE{uOgKpP>8l5t`yt)Jf_q&Q=`8#40GGP1 z{W#voVwDsM;g{AdF-c5$>nxM%51i_~Ihj1${h!AYQ+bVod{jVsB?feFv|0F(C#SD@ zc4;1_{K6lStL*&b!jbXAy>VlE^N)l0LA(pBlD@sU;g>3voeH{ZxyjCaneL5qi%*+a z%#pO2O{Y2m;sWvZl{MUc(Wp-n@G@nfue!jy#8J%;{RRKo>znQaD=8h;A~^em%|~9f zWLzNPnXE}%b8}bO>ZGCxwq|hsmZR23dl_?D8OVdywZf<#*QV^5(m22GTJe^?d14J= z({FP3;9(9YTDb6P?*|m9`L;az^56o;gENup!`kkSrZ-MwroO4t6dyWjAh3WZ_drGo zr}x~(@~pk+pk&3n!JJ8+%`&2^Ej(ua@zQffY-4#wD`McFBM=^RVN(${2)dHI^+Nz$9X54p|z1CoD@%d2b1Hk__u5uJEP4EP2%k9#+wba zDsa5=igHp*?b_0*LsTlrNz>tb-12V4*?W{DmIpaD=SrR5;CR0~$7+H2?FA+6pnkbz zNUx)Xr0mPLutm0dqQ?H?>t<;7#6TVMYvWfs&I02A4`DO=^<^B^CEfn+g|}5n7O9-v zHA!A#Oe)tO-G8N!T@wyD0h+Jqeb3e-CWp!7kG(5?EM4+FkX)mjp2Dvk!*J$J^+sUB z&b=c7GkD;>zvpL7bOpg(hJ&{jWSKX{Jeo8=;C<06m6bsS%jg38ptIt)E+5*FZsSAU zN7pTA=)U-hB05N8LYd z*4DY-W?oRIs1N25ls93nPW!CD=6CV(mk(0L-IO$ZnZwGL+Zgp$e9_u}p;H?05a0}h zP$xPbH^Az@(c~R6QWAjO)7Ag9iEmVNvhvA!F2+;-{NU179^%>bj_*fX1s*|#zrepF z(ok$0b2SAN&a(8o`eNI0HATSlg?QO-R=*_GX6la`+9VxMx-Ox${;sktzbWaad%|~@ zYo_Xkz&?lY)u!;JGMovY*l&lloZ?-eOBnJO;N$#CzqT`gl8L@tLzTh2ms`DXjITz>Rw8AAgdvOzu59x}O|M{v3+_`Raa zc&)R+_~t1sozEAyUS1ASwduT%XAVe^zvjrmBdT`A=T)2DsC+$pX099kI>J3YZ9i@2 zgW=Ew)T#yW1DxFv4SfW$1%>wJ_x;CJ%by9#Prn{d$%^b5OFf@$@xiiD<~&aC{S&I0 z!!!-?2NLJ4POPTx#sBs~DAC1(zE89YTiJ}Tw*s=5!DAny^-ZU^gdwJA`^>^4N0GU# z{F(5)jf{YY0`ap+vP_{kdFe{CUJ^WE$2Rkbke_0lu# z7r#yhaBeji*m0>joyh?yGJr zpoyIkLD}QJA}M5uiMI3oW>+rzBV| zKj=}AxVUV259bFDHg5l+i>ePAncmgW`B#@@Z}Brv(A1Vmuk(7R@V*Rpf?ke5oz~2x zQq~SuWgrjGBEa+NH)>ZL&ojO@<+Jhiy=qQr^xXpEI$us) zY+0gZ@piK_p^5A}M|u(ngWk<99y@tR{xgd{^S$j^e=e2?1&w4iKc=;5dY-wnYU1=|eJmwgcLhGd53llo{|H|9PM#c<-UPSJ1i?o&kQq~+gRA@%i% zZ@}5dyf1UI>{*_-;M>Gw9IdXOgb^OE-1}}$+N~Ro7ag^`>kVY>1J4=TM&1_r5nUZB zY6M|_)7iQ-Ud{E_EBw|fjFxZPyfb}(4^UjJ3x(Bm3R%MwZ>xRZNBxlH%>CYr>ICd_ zwWbatrwfLCyKs8VPecWL_`+V-`I2v_NHxb|=%LV2<)!2(FS3gU*bWo30O*hd>IE?q zz!j`r^KraK#<)V;vBGj}n|^XW%MtrBVkpK)LKmDhT zHTWc_`#xTaC0~iV>NCIL5s#C%CpCP(6hm)T$;M@k-jrdJqJFfLg6W|*H>gp`7Ag~2 zPjI|cVV`GbiUZVxl!LP6C*(T$TMjFGI7+uwyKh{)pF#NG%?}=5xU@3oZQV4XGsqN5 z#t;I3j(`t+#&Z=UBl?xCGfZQtwW3Lw$)?JMz5}~DFP0i<2E_z#U1Kn1zO*ZvYSi;c@>bnxs2=G zLak@GKZb6sksGKN(4Bd|vX}Rrwyk>;hMZkSCj{gJ;+dO%_`Gm#qo~hj=A+WXP1l1x z^apMf2E^Qn$#%ZXG9O-u(<^$=$u#cNT$@u^PxQ)_&yv1LQfl5rqSUQ&!`<#18$&mN zAKXA5fAP3>?u7n`*}$h`mjZjEQC_5GEgZJDg41IB#l`(dLIDTTYfNYCAn@phc$3E6?NQt9{sApH4OtIY(a@sc<-*@1iXm6 zljp>%iwTb~=DQV33%xQ)r=1aWq@(O&y63N@lq(ql;sWv59LF~Lq*9bkG0)V@IzKA) z38o~!LNvUKQ^_RVM}2!8$n%BtZV>kml-S(R_%6~eN3ei8Z$3rvZemT6{GmHjj)CRP zyOAI+&|y1kR(awgVpC3tG=@K;M_gTrZNw^PAZ)Mze@QZ9Onx~Ea3J34{e6|~g=d~Zgp1AAQ>}A?}GS4Fv@O&X&=F``TUy+d^ZMl zXl`3)rrzSQjRF3`dTrVcWwmmIw5u6b1|;8UxG&n$vf+KDaHFrqiT&`!B)~)Pg6H0v zU&#g^mx|~{PHp)OsbJ#L>Fd3L=P43C1uSIxc`?TVKOkP+!}Sr~$L?Wi5%PgoC#Fvb z$;z27<)|{ACt6xHmw4}Y0D5Tzb^5z|;5c0To<3##d1ou_s4>h#zDyQXwTJ}3qXN8dT?v9+?plfoHkSG}o>e-< z2pJDH(ph3;X7SAP)5lZ=aDD`R_OYIokY_m=Xq|6_r}FV>t}2~!LVb@7RZCuJ|K{U^ zI9_a((8;bjN0f82&#i-$-|dG*=H>DfGanU*Osr@q3T!pucGM384>luZ4 zIS&rqjG?jr8Z4!3z~69$y#E1+3qP<=q;8wGrsMNi%Xux|$HDbq?`p1U97`$Dy{$*+ zc$q3}^%At{gE)sCRElR^<$gnJHe3`a^K=V4%hket&ZwxN@Q8_=n&he{jyG6teY`W) z486>CB6|r@)h<03JWQiSBCFwfw{6nw6kRNir$Y8A?h>oQ{H^S5*ZC8w?VDPwm$}oa z{9O16uH^b;+}8o|19xYLEBD=J_39p9qUI+X>jImQyMCze7%z|Y8Xim5leTBF(>QsJ zt&LY|+F7pUmqs2}P_F9~bzInZCdoRok0>DK2IZ=I8jyto55-Dfl0v&KO+%T0+gyrO zh=$&Xb!}y?Pm=O*R7>QiFKB4<0RGA^yhojk6h9&1RC%h}eJjCNhn(XH*{v_BVmDMo zN`ogM`yhGNC0m-;FywaMV%tm|IfcVFjOjFL#OU7BereP!32zN*#L4@eeUE`G0q`vA zwg>A!s_&*4{aQkQc!J;C*~!z>q_b-34zuv#H=E^8K;D2}nitOBOat?&c)PO-3lZ$P z1wtf?S)LGhzBvnJVA>W>Tl$|!RMcQzmyF~g5c?~y`y}q zwow8dw_#scsBl9Lfyth#f3hNevGU4Ii9_Z&kTe8>Q!7`F@#y(w^DPO<$5mP37Ee_; z3FPV|Gz@iC3ZEP7`q_;G&d7?srS!{a?*B^aO+YX`EFi79rAVQpkRxEXWB)!@e5tzAb z=V)r6{e5hwr;v@_VOc(!MmIBIK4M6mz-YVUro2rjKDhY z|Exi9Zg?&YAt4vqSWey=mXQhM$qNH{_5OmE7Xk=I%*WUhrdYF##82O$=6yT7ee1iF zjpPsmV=>@BJVOO3wlNUkvnDWRz12x=Xy{N3=3DA-&TC-VsK5ZCtST~m?a*ka>Z;qy)AG?%>1_8lXUg1**pzF^ z6-6{(_u=G?u7(L{o5^vFRx~?K>LU8ov@y}iyo-7s1b9>?+i(1VJb6(dkAuGT^>g{qC<%u1Qe=W4+PU* z=_pQKs{{26`HOj)eu(!dv8@MDsgA1Sy*G-V!lzBFwC)@YP7PmrjBlif(|cy9`ohhW z!sA{=kJvQ^+M@Q9m^Mq;g%|2PtTwCYxwo$#$P)wd=+DTj6^}2R3=0ZjeBB?~%Wy1u zQpJ||{X}N)HN}cLh6WH9NN*b9m`}cbC{=imgW{dnQSH@LLpRqiA8;((AmO`9Nw5#_ z5aQtB_|W&YK4K%EygTfS&5Yq{E)j#FK%}Ky7QSvwvGtj_CLj;uwPv!hx;D36d{z61 zX5S^#?prFw&*$k*HxOCeG^ms#yA6205N|u%yZ2IY$HUk&pW69#^bqS@3#~;)8uP`x zRBBRHL+LFbeh@FkjVE+Z<*R{V0jAo*83ExDi4OX$DsdU+rw%S(OO2@k4 zCz)h@`lm~m2(MzWFWr5w%!IYSs98t7>dITlYXkN{JSmnZ3kHT$CqnK#X&bEe?LPZp zQscpJa#MkBXL~2pHW%ReLcD4N@J;sf(o@T&zBbV`cCskTxHvQMeGhYQGkjx65}4}% z_CdT2@^349U5Fcv&YG`>C;K(YyN@404o$U)?KO{3xL^I@G%jzyJFj&DS&+X8MmdXV z_>3Gfo&J?Wg4vTsnKBo?YF+NidG%mq;}k-t3;1ym@bKLQ_HBHZGj?7vznEJjVo6T( zO*!N4;;d4U41LNqW5h?mf%0n-Eme^HDUKNb`9ftWE$gs|)AvtS@);aOX4Y}iW|U{2VSmyJwPgTeDHc;h^hGL2FI|A5V4=rWX4& zwA5y{lis0M$Hm3B`07!VQTKaE1;JEO9*0NHOB${p(yu2!S@`%M{`R1{EsnSBV4`!5 zlrbegwbH9)RFRE=>sD5rGcjIgHh4D}x&&w8|ST?PxDWf zppSJo6y=f%eM*@CXV?l!K;+O;)lc@rPrERv*|%-1%Px<&r-*bZ=8jX`%N5&QpfUht zK|CRm4Y#YvApA;{yPvD4$dZ6u{mVIb=TOhL!y#X=e42|m`z~{j$C&m;`wuSOQk_p3 z6;R9!qIB5r#hAJEOy6T#5ehNXeES8RB%D5$9m)^t4Z7epa#WpDX4`ay++x1`_>`dk zlJGF#NC9F=#>QscC2d0GFHN0K8}x1N^#;?i6ft;h(5=QzJ~b%#0`dm&EG(~#8~C~< zvJ!Q;Y2Qf27hNu?Kj&R(yM2;9OrBNt5DzCHfl^2-%Xy70y~}5!n3xr?U(#}e6207L zUgUeR1R?mY?Q0;+0=nDfKb9TkqO3gOpy+I2&7T;qtEnDnY+0~Bx4gB!LeO|@hbUi3 z%#(gNh2L-~QlM7IJ@8~e={^zGuKLyHDE{F}GjHCx)tZ&6 zwkU=0G$QmKb4HFl8dZJ+0)9?dAjW6qh~kSX&yeA(qZhd10=(9^n;)@}sdDBg`SsqZ zh_T;Mi|{S7GHN+&K&-P!TWLFQ)#esLcUqc4^bLA;{rmnII@A2a(nY`ZfDEo*)JBuT~t z9_T4@P?(6@%&AQQ9^^+`teI^3loO5HKyZUYH|A;RIvqCVqUbGZ(o@|lt&D7ToPY%4 z{JVOFS-{3XWtDxR={^d_XNGYHp#T*J7O(a_Az&solYXm{IEdP_XS`Z zr1xUIio@&T3zef@C;g5koykA5T=mc+&P`yS_SSx)GyYqPKpr$dx`T&q?MqZqZt%BJ zLZ_V3cE83caW|KG;-y5g_tnrlQjlIbz|)=mT*o>alXG~>+-lY-((|#b0vAT|d!OvN z_J&P25CyPZw?I9_050Vkd zisV6xA`c>Ek;jnANOhzR@*L6xX@;~wS|J^fPDpR0A2JXbjEq3uK_(y{ARi)AkZH(l zWHquL*^K;v>_Lto$B^U5DdYlj9f^mBB*H^d;UQ`9kPLW84m_j)9`X<#(i{)zhKKaT zL;Bz$gYb|y@sPLhkg<5k`*_H7JY*pr@;x5%6CQFD4>^yATmwSzk!1KtDtsgZK9UH|b literal 17955 zcmd72cRbeL|37{`UG|oeojtNH3fZe<6^e|@$ZSa2GBdL?LrKVvGSaY;nJp`X?3Aq% zqI}Qu>8khZ^M3dFmM-fw@&+UBaX zw8e!BS1deioTR0%*tkFt$A1)CI$g59U?VMk!N$ql)xp8WX;;n%{*)9K!q{p|G2{s(-J6rGF)$ck-+Z z2A>D}<3HI#{4niLI*5CJv@Q52AtE#@8YT3b|4)bL|0FNWx{s9+i-(I`m7j@~mQ@@M zil6^abV2$*Nelii{9oD*#mNZA_n)oKfZ@F2EV*CjqqTu^qR*^PjopIxIzXi31&HxDf)MvR4xz%z)GLsX5Loo%6wY-GIP z;BnG_K@=4>QE6Pbt~GuA5I+V>jB^VN1%9j}?gNIFp80>UZvK-+{HrZpfBAs)EDBdF zNI^{cKMMXa5J46FUW5J@?+@W)e>p*nVPYj?{gqK@HnI~z^m+-do$AyOTf~|3`dyJ{ z9{!h2MD}0uQ79^UW)`?HsBkU9fHVN#uNFZ*3I6(If#t(Ekz( zY7hPW-v@)D!QSD>GOgtw3CHr}28Kx7E_;Gi6YoF=HFe*@in$yab6j$&!D@J>eM>Q5 z|A3uk#ZDhtDC9w*NJ#lf`Ten$smACQqb)0}oTx9A8AV<_wsp2%Zj#4$0gGvkcKevx z^MD?E3>TZ=@2QDF{r|dQF!*2z6XFLeI|d&s|Ep*Iua3qI1{8|r_Z0ZYYV{j}g*Q(9 zC_gHNF&2dli1d%s>e{ajPq^$qEq`D=786q=RU=gJjQPwBNoIAF6UScC6H6& zaZvpEDcCp(EXs}1piCN3v*?T4ak-dHt!a+yc8p z%$i>n`VB1SByhz6WKYJ&%EaGy|8XuS6}$FR?w0vv{2A~dG7|n!L4ZAsbLW5Ou*2eT z!r%j$Kgj>yy=ZrN;4i~(U8dh6|a3M|~Pa1G`+fgFzmN2TB=RwL%;p+fwC-;w^|5LgrrV6VG- zqRI|BW?>f;du>?*SDdlWld5y>yiRa{o_cFBMUTM%H5#ydzykj!LWH8C`Q!Tt z*sy;^`45}Fp%6a_mLBJ%FouTUcZ&fCzn8)ZK)zjs!Vvss3o>D(IE3lJpWn+9_=tKy z1$RA?{!sm^Z+@HMz$R6g!!M{PtfHYh{5A^E6H>)e1PDk>$09)C;A$cQl!LO5BfxFF zw*vu>#Q5NsThyPt`GdJQB?`D1!6HPMe;)xinuwNQ0K4$BX#@exZ%g5~Z4}m8P~#f{ zZhK#gMnG9DgDwpAXb=B+MKJPQ=9E-80@O@WvJt>lR56Tz_l;wsFhF4osd=$UmV0*;dyQy^fDSJD^(hneP9VSvKU zgj{+8gFTufndlNip7SH{r-1j#y%vZ(*HkXhg@Bf*+gu1ZQe%Au0aQ1vcef7gkrqz^q-fgDY8wGN1CRj>_GnIO z{ag|9T!h}wMg;I27lTUZzoAmL{svsKM|;;7HsTNg zn;zqJ1hDL5e}DiQU2mjcuv<|{_HYRbTYoTb*W6^lHxi^|qeL2M`#qX_&^UAtd5$qj z0@-r0W(GC*NXhE~nGFQQqgjyAiX91<+zogB1nB}?g~A?|(ZV*x`j*rPe4k1yMi=fc+g-y)z- zJu?diz;)8Dj}8d%EZP4K0o<40k0GF`^$RjPuqGEC4ZtOPv}cgW(}M_1H62CX7_h3O zPIr(J&-1er2K190*_Hxed4MD0(Q2PAHx8s z3s<)7&JM>wZ$YHQ>tc|M6K&3;q8fB;76(_ILt(eT}E!?$Xq zyGt_Lkn7zm7?$?bWGY5Oi>y9}5911(~;W4rjVxtb1$RH)< zay+XDh}@Z%LcpT<1H=mUt5R?%T(U=VA&FD&hyXRU12WvP?$>CDfz$ToCY;fHWHc zE*Xov8wP2OaSL3sM{_evA#V_Yl8>RggC@$+?=oBh3=7iV^gsZ5f@(Lu+#11KNXfM? zk}C*!(cyv*gFV{w_HSTCo(p}VfvgbN_D|IQNXgekNhbtY(YTJm0EG<{l`%#@8++64 z=n0HGrw^Cx(VVZ({O(TfPEllvC>d%~?6$AlrIFp0_=9Q#5&_o1!=eP?2o01X0}H$> z3Qv#z7v}tLWpW@2S&31VFhF4+Mu=!2;PVx-AOxfaP$nZF&!2er#h2T;dv`z`3ooUF ztM(Wjoy`C;zp>t>p&^?`=H3l0WQvXPQVAt|GXuKOR<0OY4^K<%|FB;$O^9S z|Cb>Cq_i71%5iU&e#cD&zCFPmgHivAN|>IJ|4(fo^^a&_L;o4*|2MA>;1mD8!BXWX z{?`o#JAnfDc82J`59)W@{V!t&8CKwr-`)4cD^+6^xXX}^2IpAem$enDJ8>sDbEfn6Ly)#rNn*nRCr#u zzV_wHs=Vzo6|xKfE@pIL_I;JejB&IDLrngg5_z-zKhB0QcJ*>6nX9gceKr6~*DF|4 z@vvo0ZcJ~H&%lZMVV8^VUoLXKW-KdXoS`-7n{W9dkO0I0^V&2utosBC)wF6i3?Fu~ zke~lzL~rugd!gDoC5**ZUI^xZpDR?Rro-OtYy^>VU~1kyS5rDIym|C38s|aFiIMJy4Ix+Sc!`03aN8V+slp$oe#Guz zmTUdu%3Qj3PonKT-2nGFh1-E-u{V)*0k+%xGx4e%F;_oE@Q}Fxrb@Rp=DSGTelJ+u&XkN15TLb6Hiz>WPReZm5BZG@B zr?WWiQEyUkrS?UW0$U2eL!(gCYC5&b729STDvdtEJw~s@`*|o1ont63kT`Ksifyx{ z3v`?!Z1;ScxEixRt?H`r&z9{&V+C=A#-_N zRWiOlYA{Un_DR|KTTudx+g}f^Q>R7K#|(>ke8lP1ys9Cw)@!a_l%Q0D=Fd;v;ZSs| zctmqD`r8ESkJV}?+>72{+wTC-1ARwrr|nubY0&qE_~rCM+TsiVc<(m#-b3AW}D#UqnYNlzEss_Fquxx0doId4BhFv#_izt-NP#i!}9rP>ZOHXWj7vPO53RN+Bc>& zLCys|VXf4imzH|9LKuJ;A;=^=4kr`?3eMGkx^pM5d}6Jday5S%L&NtaZFc&ao`C=( z@DCn?w3h8AKQ=z%O=K#Ij6 z>sgoeI|L<{SpWwftMx-Na=9D!JWjlMr^8-|?o&!QN`F-9>IwV`ukF!}EeXI=gn37v zmzd`1kQx=XX!p1GP)!szX&X zYlQuYt;TmD$-)B|5r;Y=z{jAxBi_Qz43 zKz*$E*xjkhaeF|1z`&-+yM|>w$Kvw>IBWXByiU`Y@fEtJ@S~Neu!+=btp{s(tCJg? ztP5Um2RT`Xmd@DZkFdVCwwHTT;HX706?Fj(s=`{L||@G0XD-)6Iu5uk2~d{ehEhH*E$DzosEwtaK*K!40A zAFvDSnSG1k3AXY6DC#U(7gI(=8ZSjpsJL$k>z0YRH5~8Kn~c-TQC;yq${DHjqnHr# z9C_P!z_>a@8}I3VOLe%3=4+A|QhjOEH(5@S1^_J)`e=Qj~S{I?>WcEb$=0SF;8#~IV6PHE!!N!2t#Tc!2v+sP4(rs1F+rvr}kjOzkI({y!$I729J?FdUfUYdXG;e6pNvDVQ@}Fc! zl4tjq}afl zbUi1^4lZ*A8Zln)X0tF&xlVE7b$Gk$Hyn@VojoW0IWmq9>i0#v*WNhX(b+mx(?}py zJ}=)C%X#xM?mKY|BP%^m&HW!jU%8~bx=o53>?ZA9#z&sTMjs%MXOzBi0S*=R2CSulK zOKf`S$enxg*K;#JPiprmz?%_R?;~3r3NvqL-k8yd$kZ18+0b@L=I}yzKS}<;@{?oU zfj*%96ycW+sp~-(wX^elqNs=DFCO0?`y%PAwV`6nxDweT6X_om>C%7$^V)M`uiU=; zp)5I7C6kgeqIKi_$%{K3>CG$k=`<1NoESB6cFXBfevE&qH+4R$o8oWEFl{YHwS9BNNGmJKy? zke;4i8cBA$n?3jSvE+r797+WcFU&jivOVT$haqv`^UU$l#xkF(Yg+z7)9BL5aQE@` z%syL3oLzwxn|bn?X}Z>*b;>;#LPE#qsOm&H=h>%1h~A-dGgU-@9yQS0vY3>7v7?r{ z8pTrdA(P~ZQu9XwuptKG+4TxJSx(b6_-in)Ch zk$KpXe(vqU2ziPMhy&Jhm#{RDy0atKrRLeiz8T9$?w?9D;Luv{+jWub$@)OtalnCj zS0wwRUk7YtWahPe7nP2Bmf?Bf`t(+DNN2cy2gtq0B0Y_zkYJ@>lYWl zesJ;Y_ZERTyMrant_O!rb)}!mzg5+F?t5sqcieKpRnmsP*lA8*~REY%kpqMCWuAb;l+y)TJqI?6I%ZhlV`0~x~6~7 zSurG^{`tc&O(L4SBC-x#m4U30G-iFLE=aGt7@*-$vEXVyC>C)`^Y%oBsKK%Yp& zbKXV0m&)nVIv*Z9M56GPXs9ysgV&_txyZFzq0H~6!8m{~u-m0B%y}J1YP#XzLp~rs zd&+o)FV~v%GHIua7AN=jvuXi2yTS6L*U-&MrBbgJC=AjpdRpxY^mP;Xq&wped#P`Q zO#QOkrQFLWvpjhXPgtDb{Hwc?kDnzDnY6D_J@wILYViEXsSAkvL0*kz@N^_lsMEi_ zm1x&<`PS9{5qM{=bGb-0s(8n@U0E}4md`AQW>h0e|A2;c5O+|R*j2? zlGR47#AV%}V65WiUAc?!%N6L4dUDq15js4ltgA?@ZBNq_d1TqRSSn;iW}V|2**Mg0 zcNfPi_jdVFyIA;W^xBrd?2iGB(;PNFH%=>>p%Mb^WkqahK!2dXJ0;3P?^#u|yQoQ7 z&Li?eiTgX<9x`e6@ha*6uml~hydBFI^F`+t=abjO)+v-bl z?44pJQL{nVIgtF666+jT!w2viGLu=hG*3Knvu%ATXMpFOO0GT}$Kh#)1svEe+Kc|a zXvMG?ee0`cm$dACCk2D`)dY+em?!a;>rtd-v#zbxfWwWJNGFlW zkG4kDi{4G+!9^_&f)*}>uvrvFf>X$gsL1slwAoEYpNeIppfm+7pgC?vnTl2GWCY6^ zlKd<-Lli$j4`N2;N2Ev=oHS~W*sA>ekiZmoQxD%;<`}m*@b4g?o*go!%nr+CeJ8?p zue*ruz+N7_rV;f_UL;-&XIqy6~mL ze7r~ZeTZBZcCTc-9cs$FpVRpv@o7w@CaD$RAiUmp@(EZDa)@zQH~M;Uh=Ep?wsoDI z>d(%f>HQDIGpvCg9EYeU8-3IGvzEAZH@iCV$czUpq%z4$`ulz9{2Y_%_aQfnface$ zIQUY5ouc-Hor;r{E$_XkbGq8WX4Yj~MK#@RuLR7(adA`}YF#THdT{#s*~e>Yd@8gW z?+iOU><>Fn=eS6cOc1;T8?_&NX4uR!_Qx)GIZn4`>QbH{O6wC_ebY2*h|SVPLL;w{I!? zq%7gd9X>{dTtSRJ9ov&5sIhmfLK5IkPZ69!Pz6odQS&#f2L0IZM`waDT1IVWhZ30Z zQ-|bl%30j$AFu-)n3sKOve)#=Q^~@L1OYYP*DqI|6nr&Xsvq!3ntyX^Y@!lOU`03% z*)_J>c*Dd|Pw;9j@b^3aQ}^BX#ca!od*G6)6P>7*m) zbaz-6ALHVkA!}Rs=x*gN?qZU6wvj`^-~c(8vW}nbJknMv(U|21QgHI~xViqE#v}i9 zZ_7tU8kvP&p^jvkM0Jt8&WJJE$e06tj)24r@~q$iKH&|Klh$D!_>-ZiTc<7LX?SZg zzTVAsx+D;k#?2mwNnkw9tC=@U_DnQ zjh|1GvP^v zIrMx`J8wwZ=k*%Pf1e*S@>`Slqj!Sku-+*URf*l0OJ7WowydwrADRg_NrjN@y6=C zova5V4!n<9&Ev*quV*)S?-Rh;UCYNlaii6dOwS`NycW%aBKgXdX(&f_Va}%{T`268 zpcjY(zUepq_+Z7zfVJMm{6=rSY1a$p$++X0mg3@3>Kxjrtp;pdU zOovI(cO@Cayq$$+_$5uXj8@yVL#{Vcsrdj7IE$jj4i1rZ7DfqeH}=HJ_=*@^QeE44 zT|;=5y~bkaslu?9GS0uD54prX5jEde+`HaTelA;FPzqivZ#HyX^%six_^?n9NWsZZ zn~jC$4l5s@(`0Ts4FO%gT(I^>j3MJ?RJJ%tgUo7+AMg*h>-15d`J<&0cXN4VtH=Dpk>fbK^qVhU_ntpr!mgfC3TgP~@nzdC&JJItb}P;>8C$Rb z(+Ej^w5T&$J{yN)(l;sF`Q$G5nrJioVVCbfmgRDqqi@jf1A%XBfclJ1kt1bkhzxy; z=-E{%(^M8InIm|Wn(Bt$gchVSp*=yM-(cQZ9~#0nSy$WpH7!p3L{jU~Q*Q4ct7x{Z z`{bf@#&er>asKf#hAt;vxTnP1`{)(>b2la8lKln@t_y|yVly)ON3U;#$%mZfjeU8f zm5tQwOA6X`iHai)3cebC`a*j6QDtWyzTfN5oL6vq?yG_4(7}+o#!Zw)6|0Q7hQVo> zX+`UAHO~?+eZ^Rpf_)Ih0kRm`4d!ntPF7d*!qxo)U!KvY;N`x+UNZr`sJ*FY9C8qXme=j-?*cslKL&+FFKs=G8S=L(| z52baFOcvj>)8OMN2{AL_h0TgQ&0~5{tzy|FwS~s(V$r}RNNkzBN8&tKJG&-40O4?uGAKVLGJpDKUk;Kk~<0;i@@@)y(mRAaI@cD9ms+=8yx}P>0 zd3dhHd#~c`{%t*o1MSBJ^a^+nN*BkV?+Lvn_w(yB^!jF>#H~K4^-f`_(?Bfv-Od-B zp727I_QHdt+$XSTq$2iGrMr9H zjT}D8z{yWW(RI{)*eM`vAsy?f%~mziL2}4IqJXvVa}Tf5?b3`Sz=7>jiQM00n=WVg zT>Dtwn5k)-EH89{BlF7LmM3T<-bmxQWDqaRdt!BvTgs_h^8J-mUscoNWoniLha(7X z+7pYZ3fW9IPs2t*@>^Iee79sD&3LPHp<1X|Q+I2_%8TmF*y1t$kSW$#t<_Cj90?C+ zq?0@z1QY0=Xh_XvAo4$Y^42@=hP0J=N5StWmxoh;77q|Swm8;)kvm;VK=5w$n^T+aCtgT7O)8eh>Hu^G4mwgKjS@R3w$nEkvaIy{&FPDwJE9z9M$w$@#(f z*r*+x-5qS}u|B=}W_n5osT_4W)slzIxiO@frzhVl8=o@v4g~Yfj~D1U;m1ua|76HN z>t_6V^y+hBKO%^tuo%u$qkP1hY8V$ z?s&E8STk+QvBksl7u;c?^0Ww9z1#xc=n~n{DX8cQ8X4)uTVQ_x+Z74K&+>-s_m#Hmw#XQsjCkY{(-M`JtOb`cb_bu(u1H9kV zre?g1Tiie7xKSt>U)dm(Ovjco~ra%f#es3O3r`fZnosQL>(sRFdKI{}`aY2*1LPEj#@E1I}H0d1BAN+t= z5o4pu!FS{A4q@+j#z@f5It9#Kf|*U-l<2Il29K`%JcYCCadRa0OwPCU2U%ydy9Sm@ z?It72%>$EU*+iZalu}k20X-DBvqR%nPrW!rqbEH2kKFDc(;90zdvqEV@DS+1cE_M+_>@g48;|gdo+aDl8(0h%O3^u7 zU$)kHl661HrpR%RaR4be`B`Plr)j-R%sW5i6mt1^V&~@|#h0N2d9~)BhTZo+`c(WF zv>&*8L|GJVoO7AI9ClxwkiC!flJ{4}CiUEK=Gi{H%NE~6Y>oeJ2zg0uU z>!177f?W}|D+u(0pFQhNEZ>?+eh_y(eQsEhUUOlntzK$izEIkeLWO6f0H^o2d%Y(h zR#-1U?tS^=`f9!TmnW|VIw;4c6wzmOx3LeoOWiRT<8%kH{wjy{zkx)PIC}3irf0yQ#3>np}*tV$;&{ z$y|)T1f<~PcgE}VlC85s`LK$VEgdu4t?+^p^(rByq4x4362W(wbGtN@-+7|JOI9k6 z6Ae|#u%D(%gX>pWl^vF45?e7MIuAun>|TIzAOy&)XEY7d{n6R>6S|{2X6INGQFl}z z=L(KwmHq6ALuG0&asGX?OTaX!bEMA5FDpz>d(nq7&GR%#Zst`m9@Ag;jWGoW8b4v6 z7n&Q+);g9OCa$6oj6cc)$&agF+Gf~xuxT(^WM0(Y-;2|WCbE^y8GO~h-g5ThkIsTT zbsJu$FH85)a;8_+ofDjVXh@JqFbVX9qy+E2$1GUtYK<&po>CWPtk(Lh*WcbUEl!qJ z-NuaLy`wH4sbNvNT`Sc*VoekC@n!s@^{h3yVBhlw7N+>Tuvvc5k(Qu}~C z^!>*d*3a|}t-rp{vhm(u4>&N7AUw%uI%(tFYzrTWi@W*N2Zyf88kmM0+jJ$xmWCbX z!hJ!Ac_eQsI(D-HC0iTd7e0MKFrzuN^nHF=Yr>tCl~3gR-<1QqqCl@sYyE`F6Zh?{ zf_DFtkIx*pIcBy@jH3LJap3FxY<7KL6X3wSm$7jnxdwHLUHh^;pC68=*x_(XZ<`6P z=t}1Dc20@B=#6U^SB_5!=SSiLCcfU%{G9pWuu9%d3Og?EgLywo4Lz1rg!F-y7|2z* z+dq;W^axH)cfKuXH@t9XB!rf+oX&fjb|ZPdz_=_Ir*|)MQ<{kPnZ^6OY5lhuL>|Pq zed3cL8twkE0GaU41QT1J1(ISd5oeBUCa=j_?BQlX_h9wF>O*R_DUVB;W6yU#)vy(B z1N{rzoz8>W8*@tPq=zJPECxm~87$pp)F-PZXD)C$My{1owF51fM|jmBsrOo1#Uk6% zMC!J>hJT{c#AwViK^o16DnA|>R=MS(dnY&-k~aF4QJ$nE|0I>>!VTk z=>hEm+r8qMBXW%PlVmO4QP%gz6vzwoC%-IHsyRR(N@>@VUG{C@>}DH2*9t{nF|_Jd zSn;S!-aJ`JB3^3M5F{>p&L^>twHipl$xo+Gw_)6EF~LG1eVDPE+qU+d%sXPCBb0%T znHpOUNaXqfNgU+;J&ZT6494MK)jSqK9?KOa`<$lm^XK_{?cv4I^X^U!1f;MW3d0aL zIjV3+>)qzKreSM`ZWWKiZ3kbTFN`78;vYT*2aW-~SFm0{a{tF?F{jh6S=uxi0>(2wO$CA(!SZ?8uf_P!OgDYrm z%s$%32hO7RWA3OK6Y6xHZPylFx-KNPP|9Jh_a2OAn0MeylzYj*f$e5O!|HIm%<-1I zIe$~FA+;7s-zu$4qC6g)eMLS^z;ViW*i{UD_HQvxQx{ui4p`~wO?W;~FxQ_$7 z@Op2za>6y8GLeYp1QpdewTxEt{_*@Cor+_wNr?x}p!m9af!$+(m#G_BVt%86r)-j- z(lqZNpML>yg1ZV8)uaon^nETR?n$5r$NM_+%v;Z@be<(^Md9ejT~(^gi+onjW7#>2 zT3a)Q_LQPH|5~3e3`@DhXg79OaS|?AUGQv5byqQqR1F25mNumo9rHkW$({^5tXKyW0nKhTEm5&57?ekf{x6(Arg1 zY66-c{2iRx+o!IUD`t1(*{;pAXGd@0U66D1jqHlk4D$cr`(!j>1*f;^pXkffBVY2O z+9-ctBdHpnw43x`YOPZJ1D$&@YTEh>pg&-}Jof_!oI^8I;}3BrHTwZ_-NE4MJw^;&Na+EQLiV*VW=^%fadW&A%1kAFOw8%*|Pu-_iez zAft8Yg{-`vZH5*TG*-OS0_FX}URma=VEh3db-nhyGLE1uV`KiO<2pQ8&7NuKp5Eu$aC`4@Vm zo?zKUo~k`Jyxu-fH99S!g7fBA`?j#+IyCQU>)Kb~pDfTbm57vAKA(Cba~ex&_w}}h zL)VvVTO({GZDxxZ^LdIhYk&h1+p2>@PoBBhDHfsz3PTII;AYV@r?(3eF^^S4f`e|@ z?|cI!*uPP=9K)@_qhyn)@c!brxenoK80zoYjgRzVPO(l9lX9;EyYM*eny9K&xR<^| zPH3#5uSw4JSSCZ$-oGAlID|Ji9JM3x4Hrj|t<>~~kPj3Hg+NhIG;|wEg6=>mPzIC_ z6+tD?Q>Yqhgu0-9Xabsr7NJ#W9om6N@gPP#hyxE2!-J&pAbC7U0}ncb2N~l*SMeZE zJSYGUx`hWN;z1AapvQR7Gd!pX59-8&-r+%Wc+h7&=o=pN9S TestResult { #[test] fn parsing_account_from_file() -> TestResult { + use miden_protocol::account::auth::AuthScheme; use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; - use miden_standards::AuthScheme; + use miden_standards::AuthMethod; use miden_standards::account::wallets::create_basic_wallet; use tempfile::tempdir; @@ -97,7 +98,9 @@ fn parsing_account_from_file() -> TestResult { let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), ); - let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; let test_account = create_basic_wallet( init_seed, @@ -138,8 +141,9 @@ path = "test_account.mac" #[test] fn parsing_native_faucet_from_file() -> TestResult { + use miden_protocol::account::auth::AuthScheme; use miden_protocol::account::{AccountBuilder, AccountFile, AccountStorageMode, AccountType}; - use miden_standards::account::auth::AuthFalcon512Rpo; + use miden_standards::account::auth::AuthSingleSig; use tempfile::tempdir; // Create a temporary directory for our test files @@ -152,7 +156,7 @@ fn parsing_native_faucet_from_file() -> TestResult { let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), ); - let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); + let auth = AuthSingleSig::new(secret_key.public_key().into(), AuthScheme::Falcon512Rpo); let faucet_component = BasicFungibleFaucet::new(TokenSymbol::new("MIDEN").unwrap(), 6, Felt::new(1_000_000_000))?; @@ -198,8 +202,9 @@ verification_base_fee = 0 #[test] fn native_faucet_from_file_must_be_faucet_type() -> TestResult { + use miden_protocol::account::auth::AuthScheme; use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; - use miden_standards::AuthScheme; + use miden_standards::AuthMethod; use miden_standards::account::wallets::create_basic_wallet; use tempfile::tempdir; @@ -213,7 +218,9 @@ fn native_faucet_from_file_must_be_faucet_type() -> TestResult { let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), ); - let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; let regular_account = create_basic_wallet( init_seed, diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 9e90bfa29..79bdbd0c8 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -447,7 +447,7 @@ fn test_storage_map_incremental_updates() { #[test] fn test_empty_storage_map_entries_query() { - use miden_protocol::account::auth::PublicKeyCommitment; + use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::{ AccountBuilder, @@ -457,7 +457,7 @@ fn test_empty_storage_map_entries_query() { StorageMap, StorageSlot, }; - use miden_standards::account::auth::AuthFalcon512Rpo; + use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; let mut forest = InnerForest::new(); @@ -482,7 +482,10 @@ fn test_empty_storage_map_entries_query() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); From 69c3ac95aa7570491b2be2b5682f42e965d3dafa Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 2 Mar 2026 14:40:24 +0100 Subject: [PATCH 63/77] fix(ci): avoid race for port (#1727) --- crates/rpc/src/tests.rs | 55 +++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 172f2266a..0ac510fd3 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -90,8 +90,8 @@ fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransa #[tokio::test] async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = { @@ -117,8 +117,8 @@ async fn rpc_server_accepts_requests_without_accept_header() { #[tokio::test] async fn rpc_server_accepts_requests_with_accept_header() { // Start the RPC. - let (mut rpc_client, _, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Send any request to the RPC. let response = send_request(&mut rpc_client).await; @@ -134,8 +134,9 @@ async fn rpc_server_accepts_requests_with_accept_header() { async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { for version in ["1.9.0", "0.8.1", "0.8.0", "0.999.0", "99.0.0"] { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = + start_store(store_listener).await; // Recreate the RPC client with an invalid version. let url = rpc_addr.to_string(); @@ -170,14 +171,14 @@ async fn rpc_startup_is_robust_to_network_failures() { // connect to each other on startup and that they reconnect after the store is restarted. // Start the RPC. - let (mut rpc_client, _, store_addr) = start_rpc().await; + let (mut rpc_client, _, store_listener) = start_rpc().await; // Test: requests against RPC api should fail immediately let response = send_request(&mut rpc_client).await; assert!(response.is_err()); // Start the store. - let (store_runtime, data_directory, _genesis) = start_store(store_addr).await; + let (store_runtime, data_directory, _genesis, store_addr) = start_store(store_listener).await; // Test: send request against RPC api and should succeed let response = send_request(&mut rpc_client).await; @@ -200,8 +201,8 @@ async fn rpc_startup_is_robust_to_network_failures() { #[tokio::test] async fn rpc_server_has_web_support() { // Start server - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Send a status request let client = reqwest::Client::new(); @@ -243,8 +244,8 @@ async fn rpc_server_has_web_support() { #[tokio::test] async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, genesis, _store_addr) = start_store(store_listener).await; // Wait for the store to be ready before sending requests. tokio::time::sleep(Duration::from_millis(100)).await; @@ -297,8 +298,8 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { #[tokio::test] async fn rpc_server_rejects_tx_submissions_without_genesis() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = @@ -349,12 +350,9 @@ async fn send_request( /// Binds a socket on an available port, runs the RPC server on it, and /// returns a client to talk to the server, along with the socket address. -async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) { - let store_addr = { - let store_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); - store_listener.local_addr().expect("store should get a local address") - }; +async fn start_rpc() -> (RpcClient, std::net::SocketAddr, TcpListener) { + let store_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); + let store_addr = store_listener.local_addr().expect("store should get a local address"); let block_producer_addr = { let block_producer_listener = TcpListener::bind("127.0.0.1:0").await.expect("Failed to bind block-producer"); @@ -397,10 +395,10 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) .await .expect("Failed to build client"); - (rpc_client, rpc_addr, store_addr) + (rpc_client, rpc_addr, store_listener) } -async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { +async fn start_store(store_listener: TcpListener) -> (Runtime, TempDir, Word, SocketAddr) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); @@ -411,7 +409,9 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { .await .expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); - let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); + let store_addr = + store_listener.local_addr().expect("store listener should get a local address"); + let rpc_listener = store_listener; let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") .await .expect("Failed to bind store ntx-builder gRPC endpoint"); @@ -439,6 +439,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { store_runtime, data_directory, genesis_state.into_block().await.unwrap().inner().header().commitment(), + store_addr, ) } @@ -482,8 +483,8 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) #[tokio::test] async fn get_limits_endpoint() { // Start the RPC and store - let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Call the get_limits endpoint let response = rpc_client.get_limits(()).await.expect("get_limits should succeed"); @@ -541,8 +542,8 @@ async fn get_limits_endpoint() { #[tokio::test] async fn sync_chain_mmr_returns_delta() { - let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; let request = proto::rpc::SyncChainMmrRequest { block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), From 731e57029e01ab5037f356812a1b8ccd863365f7 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 2 Mar 2026 16:10:33 +0100 Subject: [PATCH 64/77] chore/diesel: add comment for schema change process (#1731) --- crates/store/src/db/mod.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 74aa8ce3b..9fe6aec75 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -47,6 +47,24 @@ mod tests; pub(crate) mod models; /// [diesel](https://diesel.rs) generated schema +/// +/// ```sh +/// cargo binstall diesel_cli +/// sqlite3 -init ./src/db/migrations/001-init.sql ephemeral_setup.db "" +/// diesel setup --database-url=./ephemeral_setup.db +/// diesel print-schema > src/db/schema.rs +/// ``` +/// +/// which assumes an _existing_ database. +/// +/// Unfortunately, there is no systematic way of modifying the schema other +/// than patching (in the diff sense) which is brittle at best. +/// So the above must be followed by a manual editing step, for now it's +/// limited to: +/// +/// * `i64`/`u64` being represented as `BigInt` +/// +/// The list might be extended. pub(crate) mod schema; pub type Result = std::result::Result; From 497f5cffcfefcc2bc28fbd58bfc126fff8067fe8 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 2 Mar 2026 19:41:18 +0100 Subject: [PATCH 65/77] chore: extract misc changes from #1567 (#1721) --- bin/node/src/commands/mod.rs | 2 +- .../store/src/db/models/queries/accounts.rs | 1 + crates/store/src/inner_forest/mod.rs | 105 +++++++++--------- crates/store/src/state/apply_block.rs | 1 + crates/store/src/state/loader.rs | 1 - crates/validator/src/lib.rs | 2 +- crates/validator/src/signers/mod.rs | 4 +- 7 files changed, 58 insertions(+), 58 deletions(-) diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index b7ef3c3c5..3ec8572d7 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -121,7 +121,7 @@ pub struct BundledValidatorConfig { } impl BundledValidatorConfig { - /// Converts the [`ValidatorConfig`] into a URL and an optional [`SocketAddr`]. + /// Converts the [`BundledValidatorConfig`] into a URL and an optional [`SocketAddr`]. /// /// If the `validator_url` is set, it returns the URL and `None` for the [`SocketAddr`]. /// diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index ee0c5747f..1b6445c8e 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1113,6 +1113,7 @@ pub(crate) fn upsert_accounts( .execute(conn)?; // insert pending storage map entries + // TODO consider batching for (acc_id, slot_name, key, value) in pending_storage_inserts { insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; } diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 3c22684e7..dbff597c6 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -13,7 +13,7 @@ use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; -use miden_protocol::errors::{AssetError, StorageMapError}; +use miden_protocol::errors::{AccountError, AssetError, StorageMapError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; @@ -25,6 +25,12 @@ mod tests; #[derive(Debug, Error)] pub enum InnerForestError { + #[error(transparent)] + Account(#[from] AccountError), + #[error(transparent)] + Asset(#[from] AssetError), + #[error(transparent)] + Merkle(#[from] MerkleError), #[error( "balance underflow: account {account_id}, faucet {faucet_id}, \ previous balance {prev_balance}, delta {delta}" @@ -89,6 +95,14 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } + /// Retrieves the most recent vault root for an account. + fn get_latest_vault_root(&self, account_id: AccountId) -> Word { + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::MAX)) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, @@ -268,8 +282,20 @@ impl InnerForest { let account_id = delta.id(); let is_full_state = delta.is_full_state(); + #[cfg(debug_assertions)] + if is_full_state { + let has_vault_root = self.vault_roots.keys().any(|(id, _)| *id == account_id); + let has_storage_root = self.storage_map_roots.keys().any(|(id, ..)| *id == account_id); + let has_storage_entries = self.storage_entries.keys().any(|(id, ..)| *id == account_id); + + assert!( + !has_vault_root && !has_storage_root && !has_storage_entries, + "full-state delta should not be applied to existing account" + ); + } + if is_full_state { - self.insert_account_vault(block_num, account_id, delta.vault()); + self.insert_account_vault(block_num, account_id, delta.vault())?; } else if !delta.vault().is_empty() { self.update_account_vault(block_num, account_id, delta.vault())?; } @@ -283,61 +309,37 @@ impl InnerForest { Ok(()) } - // ASSET VAULT DELTA PROCESSING - // -------------------------------------------------------------------------------------------- - - /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the - /// account, returns an empty SMT root. - fn get_latest_vault_root(&self, account_id: AccountId) -> Word { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::MAX)) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) - } - - /// Inserts asset vault data into the forest for the specified account. Assumes that asset - /// vault for this account does not yet exist in the forest. fn insert_account_vault( &mut self, block_num: BlockNumber, account_id: AccountId, - delta: &AccountVaultDelta, - ) { - // get the current vault root for the account, and make sure it is empty + vault_delta: &AccountVaultDelta, + ) -> Result<(), InnerForestError> { let prev_root = self.get_latest_vault_root(account_id); assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); - // if there are no assets in the vault, add a root of an empty SMT to the vault roots map - // so that the map has entries for all accounts, and then return (i.e., no need to insert - // anything into the forest) - if delta.is_empty() { + if vault_delta.is_empty() { self.vault_roots.insert((account_id, block_num), prev_root); - return; + return Ok(()); } let mut entries: Vec<(Word, Word)> = Vec::new(); - // process fungible assets - for (faucet_id, amount_delta) in delta.fungible().iter() { + for (faucet_id, amount_delta) in vault_delta.fungible().iter() { let amount = (*amount_delta).try_into().expect("full-state amount should be non-negative"); - let asset = FungibleAsset::new(*faucet_id, amount).expect("valid faucet id"); + let asset = FungibleAsset::new(*faucet_id, amount)?; entries.push((asset.vault_key().into(), asset.into())); } - // process non-fungible assets - for (&asset, _action) in delta.non_fungible().iter() { - // TODO: assert that action is addition + for (&asset, action) in vault_delta.non_fungible().iter() { + debug_assert_eq!(action, &NonFungibleDeltaAction::Add); entries.push((asset.vault_key().into(), asset.into())); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); + let new_root = self.forest.batch_insert(prev_root, entries)?; self.vault_roots.insert((account_id, block_num), new_root); @@ -348,6 +350,7 @@ impl InnerForest { vault_entries = num_entries, "Inserted vault into forest" ); + Ok(()) } /// Updates the forest with vault changes from a delta. The vault delta is assumed to be @@ -363,19 +366,15 @@ impl InnerForest { &mut self, block_num: BlockNumber, account_id: AccountId, - delta: &AccountVaultDelta, - ) -> Result<(), InnerForestError> { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - - // get the previous vault root; the root could be for an empty or non-empty SMT + vault_delta: &AccountVaultDelta, + ) -> Result { let prev_root = self.get_latest_vault_root(account_id); let mut entries: Vec<(Word, Word)> = Vec::new(); // Process fungible assets - for (faucet_id, amount_delta) in delta.fungible().iter() { - let key: Word = - FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); + for (faucet_id, amount_delta) in vault_delta.fungible().iter() { + let key: Word = FungibleAsset::new(*faucet_id, 0)?.vault_key().into(); let new_amount = { // amount delta is a change that must be applied to previous balance. @@ -402,13 +401,13 @@ impl InnerForest { let value = if new_amount == 0 { EMPTY_WORD } else { - FungibleAsset::new(*faucet_id, new_amount).expect("valid fungible asset").into() + FungibleAsset::new(*faucet_id, new_amount)?.into() }; entries.push((key, value)); } // Process non-fungible assets - for (asset, action) in delta.non_fungible().iter() { + for (asset, action) in vault_delta.non_fungible().iter() { let value = match action { NonFungibleDeltaAction::Add => Word::from(Asset::NonFungible(*asset)), NonFungibleDeltaAction::Remove => EMPTY_WORD, @@ -416,13 +415,14 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); + if entries.is_empty() { + self.vault_roots.insert((account_id, block_num), prev_root); + return Ok(prev_root); + } + let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); + let new_root = self.forest.batch_insert(prev_root, entries)?; self.vault_roots.insert((account_id, block_num), new_root); @@ -433,14 +433,13 @@ impl InnerForest { vault_entries = num_entries, "Updated vault in forest" ); - Ok(()) + Ok(new_root) } // STORAGE MAP DELTA PROCESSING // -------------------------------------------------------------------------------------------- - /// Retrieves the most recent storage map SMT root for an account slot. If no storage root is - /// found for the slot, returns an empty SMT root. + /// Retrieves the most recent storage map SMT root for an account slot. fn get_latest_storage_map_root( &self, account_id: AccountId, diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs index 145432c97..7949fcbeb 100644 --- a/crates/store/src/state/apply_block.rs +++ b/crates/store/src/state/apply_block.rs @@ -277,6 +277,7 @@ impl State { .account_tree .apply_mutations(account_tree_update) .expect("Unreachable: old account tree root must be checked before this step"); + inner.blockchain.push(block_commitment); Ok(()) diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index c8c886148..14de0471f 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -376,7 +376,6 @@ pub async fn load_smt_forest( StateInitializationError::AccountToDeltaConversionFailed(e.to_string()) })?; - // Use the unified update method (will recognize it's a full-state delta) forest.update_account(block_num, &delta)?; } diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index 44f883bfc..185b9dfc6 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -5,7 +5,7 @@ mod signers; mod tx_validation; pub use server::Validator; -pub use signers::ValidatorSigner; +pub use signers::{KmsSigner, ValidatorSigner}; // CONSTANTS // ================================================================================================= diff --git a/crates/validator/src/signers/mod.rs b/crates/validator/src/signers/mod.rs index 9656e045c..21bbeaa7a 100644 --- a/crates/validator/src/signers/mod.rs +++ b/crates/validator/src/signers/mod.rs @@ -16,8 +16,8 @@ pub enum ValidatorSigner { impl ValidatorSigner { /// Constructs a signer which uses an AWS KMS key for signing. /// - /// See [`KmsSigner::new`] for details as to env var configuration and AWS IAM policies required - /// to use this functionality. + /// See [`KmsSigner`] for details as to env var configuration and AWS IAM policies + /// required to use this functionality. pub async fn new_kms(key_id: impl Into) -> anyhow::Result { let kms_signer = KmsSigner::new(key_id).await?; Ok(Self::Kms(kms_signer)) From d7120929820fdf43624211bda4807284889ca55d Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 2 Mar 2026 16:24:29 -0300 Subject: [PATCH 66/77] fix(cli): resolve clap argument conflicts in bundled command (#1732) * fix(cli): resolve clap argument conflicts in bundled command * chore: add ENV Var support for ntx data store --- CHANGELOG.md | 5 +++++ bin/node/.env | 1 + bin/node/src/commands/mod.rs | 9 +++++---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe3acc00c..e4dd862c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,11 @@ - [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). - [BREAKING] Renamed `NoteRoot` protobuf message used in `GetNoteScriptByRoot` gRPC endpoints into `NoteScriptRoot` ([#1722](https://github.com/0xMiden/miden-node/pull/1722)). +### Fixes + +- Fixed `bundled start` panicking due to duplicate `data_directory` clap argument name between `BundledCommand::Start` and `NtxBuilderConfig` ([#1732](https://github.com/0xMiden/node/pull/1732)). +- Fixed `bundled bootstrap` requiring `--validator.key.hex` or `--validator.key.kms-id` despite a default key being configured ([#1732](https://github.com/0xMiden/node/pull/1732)). + ## v0.13.7 (2026-02-25) - Updated `SyncAccountStorageMaps` and `SyncAccountVault` to allow all accounts with public state, including network accounts ([#1711](https://github.com/0xMiden/node/pull/1711)). diff --git a/bin/node/.env b/bin/node/.env index 02bceb57e..51a04794f 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -16,3 +16,4 @@ MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE= +MIDEN_NODE_NTX_DATA_DIRECTORY= diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 3ec8572d7..ec476bd49 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -45,6 +45,7 @@ const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; const ENV_VALIDATOR_KEY: &str = "MIDEN_NODE_VALIDATOR_KEY"; const ENV_VALIDATOR_KMS_KEY_ID: &str = "MIDEN_NODE_VALIDATOR_KMS_KEY_ID"; +const ENV_NTX_DATA_DIRECTORY: &str = "MIDEN_NODE_NTX_DATA_DIRECTORY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -59,7 +60,7 @@ fn duration_to_human_readable_string(duration: Duration) -> String { /// /// Used by the Validator command and the genesis bootstrap command. #[derive(clap::Args)] -#[group(required = true, multiple = false)] +#[group(required = false, multiple = false)] pub struct ValidatorKey { /// Insecure, hex-encoded validator secret key for development and testing purposes. /// @@ -178,8 +179,8 @@ pub struct NtxBuilderConfig { /// Directory for the ntx-builder's persistent database. /// /// If not set, defaults to the node's data directory. - #[arg(long = "ntx-builder.data-directory", value_name = "DIR")] - pub data_directory: Option, + #[arg(long = "ntx-builder.data-directory", env = ENV_NTX_DATA_DIRECTORY, value_name = "DIR")] + pub ntx_data_directory: Option, } impl NtxBuilderConfig { @@ -194,7 +195,7 @@ impl NtxBuilderConfig { validator_url: Url, node_data_directory: &Path, ) -> miden_node_ntx_builder::NtxBuilderConfig { - let data_dir = self.data_directory.unwrap_or_else(|| node_data_directory.to_path_buf()); + let data_dir = self.ntx_data_directory.unwrap_or_else(|| node_data_directory.to_path_buf()); let database_filepath = data_dir.join("ntx-builder.sqlite3"); miden_node_ntx_builder::NtxBuilderConfig::new( From 67546e7dd2edd87221b92b3971efd658a306b910 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 3 Mar 2026 11:48:04 +1300 Subject: [PATCH 67/77] chore: Use miden-crypto PublicKey::from_der() (#1733) --- Cargo.lock | 5 ++--- crates/validator/Cargo.toml | 1 - crates/validator/src/signers/kms.rs | 17 ++--------------- 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5119d2ae6..1bd04e719 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2747,9 +2747,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.6" +version = "0.19.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999926d48cf0929a39e06ce22299084f11d307ca9e765801eb56bf192b07054b" +checksum = "be59336a868de7c379eace9450563c2d7f4a0b7ab936835ec5a340dcd8d9a5ed" dependencies = [ "blake3", "cc", @@ -3196,7 +3196,6 @@ dependencies = [ "build-rs", "diesel", "diesel_migrations", - "k256", "miden-node-db", "miden-node-proto", "miden-node-proto-build", diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 9acd553c6..84ed6b248 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -22,7 +22,6 @@ aws-config = { version = "1.8.14" } aws-sdk-kms = { version = "1.100" } diesel = { workspace = true } diesel_migrations = { workspace = true } -k256 = "0.13.4" miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/crates/validator/src/signers/kms.rs b/crates/validator/src/signers/kms.rs index 1d52d4e24..01bfb9744 100644 --- a/crates/validator/src/signers/kms.rs +++ b/crates/validator/src/signers/kms.rs @@ -1,15 +1,11 @@ -use anyhow::Context; use aws_sdk_kms::error::SdkError; use aws_sdk_kms::operation::sign::SignError; use aws_sdk_kms::types::SigningAlgorithmSpec; -use k256::PublicKey as K256PublicKey; -use k256::elliptic_curve::sec1::ToEncodedPoint; -use k256::pkcs8::DecodePublicKey as _; use miden_node_utils::signer::BlockSigner; use miden_protocol::block::BlockHeader; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_protocol::crypto::hash::keccak::Keccak256; -use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; +use miden_tx::utils::{DeserializationError, Serializable}; // KMS SIGNER ERROR // ================================================================================================ @@ -23,9 +19,6 @@ pub enum KmsSignerError { #[error("KMS request returned an empty result")] EmptyBlob, /// The KMS backend returned a signature with an invalid format. - #[error("k256 signature error")] - K256Error(#[source] k256::ecdsa::Error), - /// The KMS backend returned a signature with an invalid format. #[error("invalid signature format")] SignatureFormatError(#[source] DeserializationError), } @@ -74,14 +67,8 @@ impl KmsSigner { let pub_key_output = client.get_public_key().key_id(key_id.clone()).send().await?; let spki_der = pub_key_output.public_key().ok_or(KmsSignerError::EmptyBlob)?.as_ref(); - // Decode the DER-encoded SPKI and compress it. - let kpub = K256PublicKey::from_public_key_der(spki_der) - .context("failed to parse SPKI as secp256k1")?; - let compressed = kpub.to_encoded_point(true); // 33 bytes, 0x02/0x03 || X. - let sec1_compressed = compressed.as_bytes(); - // Decode the compressed SPKI as a Miden public key. - let pub_key = PublicKey::read_from_bytes(sec1_compressed)?; + let pub_key = PublicKey::from_der(spki_der)?; Ok(Self { key_id, pub_key, client }) } } From f0ee9902111f29452d0a5895455f44dae2a38115 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 3 Mar 2026 08:04:21 +0100 Subject: [PATCH 68/77] chore: ignore flaky test re capacity, serialize prove generating tests (#1723) --- Cargo.lock | 1 + bin/remote-prover/Cargo.toml | 1 + bin/remote-prover/src/server/tests.rs | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1bd04e719..1805260e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3310,6 +3310,7 @@ dependencies = [ "miette", "opentelemetry", "prost", + "serial_test", "tokio", "tokio-stream", "tonic", diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 3b0009cbc..28201d5b2 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -45,6 +45,7 @@ miden-protocol = { features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } +serial_test = { version = "3" } [build-dependencies] build-rs = { workspace = true } diff --git a/bin/remote-prover/src/server/tests.rs b/bin/remote-prover/src/server/tests.rs index d51b5c1da..f1d526b16 100644 --- a/bin/remote-prover/src/server/tests.rs +++ b/bin/remote-prover/src/server/tests.rs @@ -15,6 +15,7 @@ use miden_testing::{Auth, MockChainBuilder}; use miden_tx::utils::{Deserializable, Serializable}; use miden_tx::{LocalTransactionProver, TransactionVerifier}; use miden_tx_batch_prover::LocalBatchProver; +use serial_test::serial; use crate::generated::api_client::ApiClient; use crate::generated::{Proof, ProofRequest, ProofType}; @@ -175,6 +176,7 @@ impl Server { /// /// Create a server with a capacity of one and submit two requests. Ensure /// that one succeeds and one fails with a resource exhaustion error. +#[serial] #[tokio::test(flavor = "multi_thread")] async fn legacy_behaviour_with_capacity_1() { let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) @@ -208,7 +210,9 @@ async fn legacy_behaviour_with_capacity_1() { /// /// Create a server with a capacity of two and submit three requests. Ensure /// that two succeed and one fails with a resource exhaustion error. +#[ignore = "Proving 3 requests concurrently causes temporary CI resource starvation which results in _sporadic_ timeouts"] #[tokio::test(flavor = "multi_thread")] +#[serial] async fn capacity_is_respected() { let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) .with_capacity(2) @@ -332,6 +336,7 @@ async fn unsupported_proof_kind_is_rejected() { /// /// The proof is verified and the transaction IDs of request and response must correspond. #[tokio::test(flavor = "multi_thread")] +#[serial] async fn transaction_proof_is_correct() { let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) .spawn() @@ -356,6 +361,7 @@ async fn transaction_proof_is_correct() { /// The proof is replicated locally, which ensures that the gRPC codec and server code do the /// correct thing. #[tokio::test(flavor = "multi_thread")] +#[serial] async fn batch_proof_is_correct() { let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) .spawn() From 4335f9b70f89b49f8f9ae14c4b04e36ba3910941 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 3 Mar 2026 20:05:10 +1300 Subject: [PATCH 69/77] feat(validator): verify signature from kms (#1734) --- crates/validator/src/signers/kms.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/validator/src/signers/kms.rs b/crates/validator/src/signers/kms.rs index 01bfb9744..d6bd28567 100644 --- a/crates/validator/src/signers/kms.rs +++ b/crates/validator/src/signers/kms.rs @@ -21,6 +21,9 @@ pub enum KmsSignerError { /// The KMS backend returned a signature with an invalid format. #[error("invalid signature format")] SignatureFormatError(#[source] DeserializationError), + /// The KMS backend returned a signature that was not able to be verified. + #[error("invalid signature")] + InvalidSignature, } // KMS SIGNER @@ -102,8 +105,15 @@ impl BlockSigner for KmsSigner { let sig_der = sign_output.signature().ok_or(KmsSignerError::EmptyBlob)?; // Recovery id is not used by verify(pk), so 0 is fine. let recovery_id = 0; - Signature::from_der(sig_der.as_ref(), recovery_id) - .map_err(KmsSignerError::SignatureFormatError) + let sig = Signature::from_der(sig_der.as_ref(), recovery_id) + .map_err(KmsSignerError::SignatureFormatError)?; + + // Check the returned signature. + if sig.verify(header.commitment(), &self.pub_key) { + Ok(sig) + } else { + Err(KmsSignerError::InvalidSignature) + } } fn public_key(&self) -> PublicKey { From a03b96d6d59a27bc0aeb7c0dadfb0c249e64faa2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:04:29 +0200 Subject: [PATCH 70/77] chore(deps): bump keccak from 0.1.5 to 0.1.6 (#1737) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1805260e0..d1c6c7e7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2390,9 +2390,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] From 63f85f8ba6a83ea17240b46a41919c7a992923b7 Mon Sep 17 00:00:00 2001 From: Marti Date: Tue, 3 Mar 2026 13:52:28 +0100 Subject: [PATCH 71/77] chore: update repo references from 0xMiden/miden-node to 0xMiden/node (#1740) Co-authored-by: Claude Sonnet 4.6 --- .github/actions/debian/action.yml | 4 +- CHANGELOG.md | 348 ++++++++++----------- Cargo.toml | 2 +- README.md | 2 +- bin/node/Dockerfile | 4 +- bin/remote-prover/README.md | 8 +- crates/store/src/state/loader.rs | 2 +- docs/external/src/index.md | 2 +- docs/external/src/operator/index.md | 2 +- docs/external/src/operator/installation.md | 8 +- docs/external/src/operator/usage.md | 2 +- docs/external/src/rpc.md | 2 +- docs/internal/book.toml | 2 +- docs/internal/src/index.md | 2 +- 14 files changed, 195 insertions(+), 195 deletions(-) diff --git a/.github/actions/debian/action.yml b/.github/actions/debian/action.yml index 302e29e81..99d65d069 100644 --- a/.github/actions/debian/action.yml +++ b/.github/actions/debian/action.yml @@ -121,8 +121,8 @@ runs: Maintainer: Miden Description: $pkg binary package Homepage: https://miden.xyz - Vcs-Git: git@github.com:0xMiden/miden-node.git - Vcs-Browser: https://github.com/0xMiden/miden-node + Vcs-Git: git@github.com:0xMiden/node.git + Vcs-Browser: https://github.com/0xMiden/node EOF - name: Build binaries diff --git a/CHANGELOG.md b/CHANGELOG.md index e4dd862c1..5a23eb125 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,28 +4,28 @@ ### Enhancements -- [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/miden-node/pull/1579)). -- [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). -- Validator now persists validated transactions ([#1614](https://github.com/0xMiden/miden-node/pull/1614)). -- [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). -- Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). -- Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/miden-node/issues/1701)). -- Added KMS signing support in validator ([#1677](https://github.com/0xMiden/miden-node/pull/1677)). +- [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/node/pull/1579)). +- [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/node/pull/1595)). +- Validator now persists validated transactions ([#1614](https://github.com/0xMiden/node/pull/1614)). +- [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/node/issues/1591)). +- Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/node/issues/1591)). +- Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/node/issues/1701)). +- Added KMS signing support in validator ([#1677](https://github.com/0xMiden/node/pull/1677)). ### Changes -- [BREAKING] Removed obsolete `SyncState` RPC endpoint; clients should use `SyncNotes`, `SyncNullifiers`, `SyncAccountVault`, `SyncAccountStorageMaps`, `SyncTransactions`, or `SyncChainMmr` instead ([#1636](https://github.com/0xMiden/miden-node/pull/1636)). -- Added account ID limits for `SyncTransactions`, `SyncAccountVault`, and `SyncAccountStorageMaps` to `GetLimits` responses ([#1636](https://github.com/0xMiden/miden-node/pull/1636)). -- [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/miden-node/pull/1646)). -- Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). -- Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). -- Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). -- Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). -- Added support for generic account loading at genesis ([#1624](https://github.com/0xMiden/miden-node/pull/1624)). -- Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) - - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). -- [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). -- [BREAKING] Renamed `NoteRoot` protobuf message used in `GetNoteScriptByRoot` gRPC endpoints into `NoteScriptRoot` ([#1722](https://github.com/0xMiden/miden-node/pull/1722)). +- [BREAKING] Removed obsolete `SyncState` RPC endpoint; clients should use `SyncNotes`, `SyncNullifiers`, `SyncAccountVault`, `SyncAccountStorageMaps`, `SyncTransactions`, or `SyncChainMmr` instead ([#1636](https://github.com/0xMiden/node/pull/1636)). +- Added account ID limits for `SyncTransactions`, `SyncAccountVault`, and `SyncAccountStorageMaps` to `GetLimits` responses ([#1636](https://github.com/0xMiden/node/pull/1636)). +- [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/node/pull/1646)). +- Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/node/pull/1594)). +- Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/node/pull/1610)). +- Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/node/pull/1611)). +- Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/node/pull/1651)). +- Added support for generic account loading at genesis ([#1624](https://github.com/0xMiden/node/pull/1624)). +- Improved tracing span fields ([#1650](https://github.com/0xMiden/node/pull/1650)) + - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/node/pull/1662)). +- [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/node/pull/1688)). +- [BREAKING] Renamed `NoteRoot` protobuf message used in `GetNoteScriptByRoot` gRPC endpoints into `NoteScriptRoot` ([#1722](https://github.com/0xMiden/node/pull/1722)). ### Fixes @@ -42,267 +42,267 @@ ## v0.13.5 (2026-02-19) -- OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/miden-node/pull/1643)). -- Added support for the note transport layer in the network monitor ([#1660](https://github.com/0xMiden/miden-node/pull/1660)). -- Debian packages now include debug symbols ([#1666](https://github.com/0xMiden/miden-node/pull/1666)). -- Debian packages now have coredumps enabled ([#1666](https://github.com/0xMiden/miden-node/pull/1666)). -- Fixed storage map keys not being hashed before insertion into the store's SMT forest ([#1681](https://github.com/0xMiden/miden-node/pull/1681)). +- OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/node/pull/1643)). +- Added support for the note transport layer in the network monitor ([#1660](https://github.com/0xMiden/node/pull/1660)). +- Debian packages now include debug symbols ([#1666](https://github.com/0xMiden/node/pull/1666)). +- Debian packages now have coredumps enabled ([#1666](https://github.com/0xMiden/node/pull/1666)). +- Fixed storage map keys not being hashed before insertion into the store's SMT forest ([#1681](https://github.com/0xMiden/node/pull/1681)). ## v0.13.4 (2026-02-04) -- Fixed network monitor displaying explorer URL as a "null" hyperlink when unset ([#1617](https://github.com/0xMiden/miden-node/pull/1617)). -- Fixed empty storage maps not being inserted into `storage_entries` table when inserting storage delta ([#1642](https://github.com/0xMiden/miden-node/pull/1642)). +- Fixed network monitor displaying explorer URL as a "null" hyperlink when unset ([#1617](https://github.com/0xMiden/node/pull/1617)). +- Fixed empty storage maps not being inserted into `storage_entries` table when inserting storage delta ([#1642](https://github.com/0xMiden/node/pull/1642)). ## v0.13.3 (2026-01-29) -- Fixed network monitor faucet test failing to parse `/get_metadata` response due to field type mismatches ([#1612](https://github.com/0xMiden/miden-node/pull/1612)). +- Fixed network monitor faucet test failing to parse `/get_metadata` response due to field type mismatches ([#1612](https://github.com/0xMiden/node/pull/1612)). ## v0.13.2 (2026-01-27) -- Network transaction builder no longer creates conflicting transactions by consuming the same notes twice ([#1597](https://github.com/0xMiden/miden-node/issues/1597)). +- Network transaction builder no longer creates conflicting transactions by consuming the same notes twice ([#1597](https://github.com/0xMiden/node/issues/1597)). ## v0.13.1 (2026-01-27) ### Enhancements -- Bootstrap's genesis configuration file now allows eliding `wallet` and `fungible_faucet` fields ([#1590](https://github.com/0xMiden/miden-node/pull/1590)). -- Updated miden-base dependencies to version 0.13.3 ([#1601](https://github.com/0xMiden/miden-node/pull/1601)). +- Bootstrap's genesis configuration file now allows eliding `wallet` and `fungible_faucet` fields ([#1590](https://github.com/0xMiden/node/pull/1590)). +- Updated miden-base dependencies to version 0.13.3 ([#1601](https://github.com/0xMiden/node/pull/1601)). ### Fixes -- Bootstrap's genesis configuration file is now optional again ([#1590](https://github.com/0xMiden/miden-node/pull/1590)). -- Network transaction builder fails if output notes are created ([#1599](https://github.com/0xMiden/miden-node/pull/1599)). -- Fixed the copy button in the network monitor ([#1600](https://github.com/0xMiden/miden-node/pull/1600)). -- Network transaction builder now loads foreign account code into the MAST store when consuming network notes ([#1598](https://github.com/0xMiden/miden-node/pull/1598)). +- Bootstrap's genesis configuration file is now optional again ([#1590](https://github.com/0xMiden/node/pull/1590)). +- Network transaction builder fails if output notes are created ([#1599](https://github.com/0xMiden/node/pull/1599)). +- Fixed the copy button in the network monitor ([#1600](https://github.com/0xMiden/node/pull/1600)). +- Network transaction builder now loads foreign account code into the MAST store when consuming network notes ([#1598](https://github.com/0xMiden/node/pull/1598)). ## v0.13.0 (2026-01-23) ### Enhancements -- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/miden-node/issues/1304)). -- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). -- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). -- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). -- Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). -- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). -- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). -- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). -- Added pagination to `GetNetworkAccountIds` store endpoint ([#1452](https://github.com/0xMiden/miden-node/pull/1452)). -- Integrated NTX Builder with validator via `SubmitProvenTransaction` RPC ([#1453](https://github.com/0xMiden/miden-node/pull/1453)). -- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). -- Added partial storage map queries to RPC ([#1428](https://github.com/0xMiden/miden-node/pull/1428)). -- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/miden-node/pull/1450)). -- Added validated transactions check to block validation logic in Validator ([#1460](https://github.com/0xMiden/miden-node/pull/1460)). -- Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). -- Added DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). -- Added foreign account support to validator ([#1493](https://github.com/0xMiden/miden-node/pull/1493)). -- Decoupled ntx-builder from block-producer startup by loading network accounts asynchronously via a background task ([#1495](https://github.com/0xMiden/miden-node/pull/1495)). -- Improved DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496)). -- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). -- Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). -- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). -- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). -- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). -- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). -- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). -- Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). -- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)). +- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/node/issues/1304)). +- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/node/pull/1381)). +- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/node/pull/1383)). +- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/node/pull/1392)). +- Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/node/pull/1410)). +- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/node/pull/1419)). +- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/node/pull/1420)). +- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/node/pull/1433)). +- Added pagination to `GetNetworkAccountIds` store endpoint ([#1452](https://github.com/0xMiden/node/pull/1452)). +- Integrated NTX Builder with validator via `SubmitProvenTransaction` RPC ([#1453](https://github.com/0xMiden/node/pull/1453)). +- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/node/pull/1457)). +- Added partial storage map queries to RPC ([#1428](https://github.com/0xMiden/node/pull/1428)). +- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/node/pull/1450)). +- Added validated transactions check to block validation logic in Validator ([#1460](https://github.com/0xMiden/node/pull/1460)). +- Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/node/pull/1484)). +- Added DB schema change check ([#1268](https://github.com/0xMiden/node/pull/1485)). +- Added foreign account support to validator ([#1493](https://github.com/0xMiden/node/pull/1493)). +- Decoupled ntx-builder from block-producer startup by loading network accounts asynchronously via a background task ([#1495](https://github.com/0xMiden/node/pull/1495)). +- Improved DB query performance for account queries ([#1496](https://github.com/0xMiden/node/pull/1496)). +- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/node/pull/1512)). +- Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/node/pull/1517)). +- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/node/issues/1534)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/node/pull/1569)). +- Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/node/pull/1521)). +- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/node/pull/1536)). ### Changes -- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). -- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). -- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). -- [BREAKING] Removed `GetAccountDetails` RPC endpoint. Use `GetAccount` instead ([#1185](https://github.com/0xMiden/miden-node/issues/1185)). -- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). -- Normalized response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). -- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). -- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). -- Removed `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). -- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). -- Added `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). -- Refactored account table and introduce tracking forest ([#1394](https://github.com/0xMiden/miden-node/pull/1394)). -- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). -- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). -- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). -- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). -- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). -- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). -- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). -- [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). -- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). -- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). -- Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). -- [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). -- [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/miden-node/pull/1572)). -- [BREAKING] Renamed `SyncStorageMaps` RPC endpoint to `SyncAccountStorageMaps` for consistency ([#1581](https://github.com/0xMiden/miden-node/pull/1581)). -- Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/miden-node/pull/1576)). -- [BREAKING] Renamed `GetNetworkAccountDetailsByPrefix` endpoint to `GetNetworkAccountDetailsById` which now accepts full account ID instead of 30-bit prefix ([#1580](https://github.com/0xMiden/miden-node/pull/1580)). -- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). +- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/node/pull/1366)). +- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/node/pull/1388)). +- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/node/pull/1441)). +- [BREAKING] Removed `GetAccountDetails` RPC endpoint. Use `GetAccount` instead ([#1185](https://github.com/0xMiden/node/issues/1185)). +- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/node/pull/1357)). +- Normalized response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/node/pull/1357)). +- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/node/pull/1348)). +- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/node/pull/1294)). +- Removed `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/node/issues/1352)). +- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/node/pull/1298), [#1436](https://github.com/0xMiden/node/pull/1436)). +- Added `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/node/issues/1353)). +- Refactored account table and introduce tracking forest ([#1394](https://github.com/0xMiden/node/pull/1394)). +- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/node/pull/1401)). +- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/node/pull/1424)). +- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/node/pull/1426)). +- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/node/pull/1430)). +- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/node/pull/1438)). +- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/node/pull/1438)). +- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/node/pull/1443)). +- [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/node/pull/1476)). +- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/node/pull/1481)). +- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/node/pull/1497)). +- Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/node/pull/1503)). +- [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/node/pull/1526)). +- [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/node/pull/1572)). +- [BREAKING] Renamed `SyncStorageMaps` RPC endpoint to `SyncAccountStorageMaps` for consistency ([#1581](https://github.com/0xMiden/node/pull/1581)). +- Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/node/pull/1576)). +- [BREAKING] Renamed `GetNetworkAccountDetailsByPrefix` endpoint to `GetNetworkAccountDetailsById` which now accepts full account ID instead of 30-bit prefix ([#1580](https://github.com/0xMiden/node/pull/1580)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/node/pull/1569)). ### Fixes -- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). -- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). -- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). -- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). -- Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). -- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). -- Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/miden-node/pull/1501)). -- gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/miden-node/pull/1553)). -- Fixed ntx-builder crash on node restart after network transaction by adding missing `is_latest` filter to network account query ([#1578](https://github.com/0xMiden/miden-node/pull/1578)). +- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/node/pull/1370)). +- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/node/pull/1403)). +- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/node/pull/1407)). +- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/node/issues/1422)). +- Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/node/pull/1461)). +- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/node/pull/1489)). +- Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/node/pull/1501)). +- gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/node/pull/1553)). +- Fixed ntx-builder crash on node restart after network transaction by adding missing `is_latest` filter to network account query ([#1578](https://github.com/0xMiden/node/pull/1578)). ## v0.12.8 (2026-01-15) ### Enhancements -- Enable traces within database closures ([#1511](https://github.com/0xMiden/miden-node/pull/1511)). +- Enable traces within database closures ([#1511](https://github.com/0xMiden/node/pull/1511)). ## v0.12.7 (2026-01-15) ### Enhancements -- Emit database table size metrics ([#1511](https://github.com/0xMiden/miden-node/pull/1511)). -- Improved telemetry in the network transaction builder ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). -- Improved telemetry in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). +- Emit database table size metrics ([#1511](https://github.com/0xMiden/node/pull/1511)). +- Improved telemetry in the network transaction builder ([#1508](https://github.com/0xMiden/node/pull/1508)). +- Improved telemetry in the store's `apply_block` ([#1508](https://github.com/0xMiden/node/pull/1508)). ### Fixes -- Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). -- Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). -- Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). +- Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/node/pull/1508)). +- Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/node/pull/1508)). +- Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/node/pull/1508)). - This presented as a database locked error and in rare cases a desync between the mempool and store. ## v0.12.6 (2026-01-12) ### Enhancements -- Added Faucet metadata to the `miden-network-monitor` binary ([#1373](https://github.com/0xMiden/miden-node/pull/1373)). -- Improve telemetry in the store ([#1504](https://github.com/0xMiden/miden-node/pull/1504)). +- Added Faucet metadata to the `miden-network-monitor` binary ([#1373](https://github.com/0xMiden/node/pull/1373)). +- Improve telemetry in the store ([#1504](https://github.com/0xMiden/node/pull/1504)). ### Fixes -- Block producer crash caused by pass through transactions ([#1396](https://github.com/0xMiden/miden-node/pull/1396)). +- Block producer crash caused by pass through transactions ([#1396](https://github.com/0xMiden/node/pull/1396)). ## v0.12.5 (2025-11-27) -- Actually update `miden-base` dependencies ([#1384](https://github.com/0xMiden/miden-node/pull/1384)). +- Actually update `miden-base` dependencies ([#1384](https://github.com/0xMiden/node/pull/1384)). ## v0.12.4 (2025-11-27) -- Split counter increment and tracking services in `miden-network-monitor` binary ([#1362](https://github.com/0xMiden/miden-node/pull/1362)). -- Updated the counter account from the `miden-network-monitor` to start at 0 ([#1367](https://github.com/0xMiden/miden-node/pull/1367)). -- Updated `miden-base` dependencies to fix ECDSA issues ([#1382](https://github.com/0xMiden/miden-node/pull/1382)). +- Split counter increment and tracking services in `miden-network-monitor` binary ([#1362](https://github.com/0xMiden/node/pull/1362)). +- Updated the counter account from the `miden-network-monitor` to start at 0 ([#1367](https://github.com/0xMiden/node/pull/1367)). +- Updated `miden-base` dependencies to fix ECDSA issues ([#1382](https://github.com/0xMiden/node/pull/1382)). ## v0.12.3 (2025-11-15) -- Added configurable timeout support to `RemoteBatchProver`, `RemoteBlockProver`, and `RemoteTransactionProver` clients ([#1365](https://github.com/0xMiden/miden-node/pull/1365)). -- Added configurable timeout support to `miden-network-monitor` binary ([#1365](https://github.com/0xMiden/miden-node/pull/1365)). +- Added configurable timeout support to `RemoteBatchProver`, `RemoteBlockProver`, and `RemoteTransactionProver` clients ([#1365](https://github.com/0xMiden/node/pull/1365)). +- Added configurable timeout support to `miden-network-monitor` binary ([#1365](https://github.com/0xMiden/node/pull/1365)). ## v0.12.2 (2025-11-12) -- Fixed `PoW` challenge solving in `miden-network-monitor` binary ([#1363](https://github.com/0xMiden/miden-node/pull/1363)). +- Fixed `PoW` challenge solving in `miden-network-monitor` binary ([#1363](https://github.com/0xMiden/node/pull/1363)). ## v0.12.1 (2025-11-08) -- Added support for network transaction service in `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/miden-node/pull/1295)). -- Improves `.env` file example in for the `miden-network-monitor` binary ([#1345](https://github.com/0xMiden/miden-node/pull/1345)). +- Added support for network transaction service in `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/node/pull/1295)). +- Improves `.env` file example in for the `miden-network-monitor` binary ([#1345](https://github.com/0xMiden/node/pull/1345)). ## v0.12.0 (2025-11-06) ### Changes - [BREAKING] Updated MSRV to 1.90. -- [BREAKING] Refactored `CheckNullifiersByPrefix` endpoint adding pagination ([#1191](https://github.com/0xMiden/miden-node/pull/1191)). -- [BREAKING] Renamed `CheckNullifiersByPrefix` endpoint to `SyncNullifiers` ([#1191](https://github.com/0xMiden/miden-node/pull/1191)). -- Added `GetNoteScriptByRoot` gRPC endpoint for retrieving a note script by its root ([#1196](https://github.com/0xMiden/miden-node/pull/1196)). -- [BREAKING] Added `block_range` and `pagination_info` fields to paginated gRPC endpoints ([#1205](https://github.com/0xMiden/miden-node/pull/1205)). -- Implemented usage of `tonic` error codes for gRPC errors ([#1208](https://github.com/0xMiden/miden-node/pull/1208)). -- [BREAKING] Replaced `GetAccountProofs` with `GetAccountProof` in the public store API (#[1211](https://github.com/0xMiden/miden-node/pull/1211)). -- Implemented storage map `DataStore` function ([#1226](https://github.com/0xMiden/miden-node/pull/1226)). -- [BREAKING] Refactored the mempool to use a single DAG across transactions and batches ([#1234](https://github.com/0xMiden/miden-node/pull/1234)). -- [BREAKING] Renamed `RemoteProverProxy` to `RemoteProverClient` ([#1236](https://github.com/0xMiden/miden-node/pull/1236)). -- Added pagination to `SyncNotes` endpoint ([#1257](https://github.com/0xMiden/miden-node/pull/1257)). -- Added application level error in gRPC endpoints ([#1266](https://github.com/0xMiden/miden-node/pull/1266)). -- Added `deploy-account` command to `miden-network-monitor` binary ([#1276](https://github.com/0xMiden/miden-node/pull/1276)). -- [BREAKING] Response type nuances of `GetAccountProof` in the public store API (#[1277](https://github.com/0xMiden/miden-node/pull/1277)). -- Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/miden-node/pull/1278)). -- Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/miden-node/pull/1293)). -- [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/miden-node/pull/1292)). -- [BREAKING] Added `rocksdb` feature to enable rocksdb backends of `LargeSmt` ([#1326](https://github.com/0xMiden/miden-node/pull/1326)). -- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/miden-node/pull/1333)). -- Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/miden-node/pull/1332)). -- Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/miden-node/pull/1338)). +- [BREAKING] Refactored `CheckNullifiersByPrefix` endpoint adding pagination ([#1191](https://github.com/0xMiden/node/pull/1191)). +- [BREAKING] Renamed `CheckNullifiersByPrefix` endpoint to `SyncNullifiers` ([#1191](https://github.com/0xMiden/node/pull/1191)). +- Added `GetNoteScriptByRoot` gRPC endpoint for retrieving a note script by its root ([#1196](https://github.com/0xMiden/node/pull/1196)). +- [BREAKING] Added `block_range` and `pagination_info` fields to paginated gRPC endpoints ([#1205](https://github.com/0xMiden/node/pull/1205)). +- Implemented usage of `tonic` error codes for gRPC errors ([#1208](https://github.com/0xMiden/node/pull/1208)). +- [BREAKING] Replaced `GetAccountProofs` with `GetAccountProof` in the public store API (#[1211](https://github.com/0xMiden/node/pull/1211)). +- Implemented storage map `DataStore` function ([#1226](https://github.com/0xMiden/node/pull/1226)). +- [BREAKING] Refactored the mempool to use a single DAG across transactions and batches ([#1234](https://github.com/0xMiden/node/pull/1234)). +- [BREAKING] Renamed `RemoteProverProxy` to `RemoteProverClient` ([#1236](https://github.com/0xMiden/node/pull/1236)). +- Added pagination to `SyncNotes` endpoint ([#1257](https://github.com/0xMiden/node/pull/1257)). +- Added application level error in gRPC endpoints ([#1266](https://github.com/0xMiden/node/pull/1266)). +- Added `deploy-account` command to `miden-network-monitor` binary ([#1276](https://github.com/0xMiden/node/pull/1276)). +- [BREAKING] Response type nuances of `GetAccountProof` in the public store API (#[1277](https://github.com/0xMiden/node/pull/1277)). +- Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/node/pull/1278)). +- Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/node/pull/1293)). +- [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/node/pull/1292)). +- [BREAKING] Added `rocksdb` feature to enable rocksdb backends of `LargeSmt` ([#1326](https://github.com/0xMiden/node/pull/1326)). +- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/node/pull/1333)). +- Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/node/pull/1332)). +- Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/node/pull/1338)). ## v0.11.3 (2025-11-04) -- Reduced note retries to 1 ([#1308](https://github.com/0xMiden/miden-node/pull/1308)). -- Address network transaction builder (NTX) invariant breaking for unavailable accounts ([#1312](https://github.com/0xMiden/miden-node/pull/1312)). -- Tweaked HTTP configurations on the pingora proxy server ([#1281](https://github.com/0xMiden/miden-node/pull/1281)). -- Added the counter increment task to `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/miden-node/pull/1295)). +- Reduced note retries to 1 ([#1308](https://github.com/0xMiden/node/pull/1308)). +- Address network transaction builder (NTX) invariant breaking for unavailable accounts ([#1312](https://github.com/0xMiden/node/pull/1312)). +- Tweaked HTTP configurations on the pingora proxy server ([#1281](https://github.com/0xMiden/node/pull/1281)). +- Added the counter increment task to `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/node/pull/1295)). ## v0.11.2 (2025-09-10) -- Added support for keepalive requests against base path `/` of RPC server ([#1212](https://github.com/0xMiden/miden-node/pull/1212)). -- [BREAKING] Replace `GetAccountProofs` with `GetAccountProof` in the public store API ([#1211](https://github.com/0xMiden/miden-node/pull/1211)). -- [BREAKING] Optimize `GetAccountProof` for small accounts ([#1185](https://github.com/0xMiden/miden-node/pull/1185)). +- Added support for keepalive requests against base path `/` of RPC server ([#1212](https://github.com/0xMiden/node/pull/1212)). +- [BREAKING] Replace `GetAccountProofs` with `GetAccountProof` in the public store API ([#1211](https://github.com/0xMiden/node/pull/1211)). +- [BREAKING] Optimize `GetAccountProof` for small accounts ([#1185](https://github.com/0xMiden/node/pull/1185)). ## v0.11.1 (2025-09-08) - Removed decorators from scripts when submitting transactions and batches, and inserting notes into the DB ([#1194](https://github.com/ -0xMiden/miden-node/pull/1194)). +0xMiden/node/pull/1194)). - Refresh `miden-base` dependencies. -- Added `SyncTransactions` gRPC endpoint for retrieving transactions for specific accounts within a block range ([#1224](https://github.com/0xMiden/miden-node/pull/1224)). -- Added `miden-network-monitor` binary for monitoring the Miden network ([#1217](https://github.com/0xMiden/miden-node/pull/1217)). +- Added `SyncTransactions` gRPC endpoint for retrieving transactions for specific accounts within a block range ([#1224](https://github.com/0xMiden/node/pull/1224)). +- Added `miden-network-monitor` binary for monitoring the Miden network ([#1217](https://github.com/0xMiden/node/pull/1217)). ## v0.11.0 (2025-08-28) ### Enhancements -- Added environment variable support for batch and block size CLI arguments ([#1081](https://github.com/0xMiden/miden-node/pull/1081)). -- RPC accept header now supports specifying the genesis commitment in addition to the RPC version. This lets clients ensure they are on the right network ([#1084](https://github.com/0xMiden/miden-node/pull/1084)). -- A transaction's account delta is now checked against its commitments in `SubmitProvenTransaction` endpoint ([#1093](https://github.com/0xMiden/miden-node/pull/1093)). -- Added check for Account Id prefix uniqueness when transactions to create accounts are submitted to the mempool ([#1094](https://github.com/0xMiden/miden-node/pull/1094)). -- Added benchmark CLI sub-command for the `miden-store` component to measure the state load time ([#1154](https://github.com/0xMiden/miden-node/pull/1154)). -- Retry failed network notes with exponential backoff instead of immediately ([#1116](https://github.com/0xMiden/miden-node/pull/1116)) -- Network notes are now dropped after failing 30 times ([#1116](https://github.com/0xMiden/miden-node/pull/1116)) -- gRPC server timeout is now configurable (defaults to `10s`) ([#1133](https://github.com/0xMiden/miden-node/pull/1133)) -- [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/miden-node/pull/#1045)). -- Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/miden-node/pull/1140), [#1132](https://github.com/0xMiden/miden-node/pull/1132)). -- Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/miden-node/pull/1176)). -- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/miden-node/pull/1219)). +- Added environment variable support for batch and block size CLI arguments ([#1081](https://github.com/0xMiden/node/pull/1081)). +- RPC accept header now supports specifying the genesis commitment in addition to the RPC version. This lets clients ensure they are on the right network ([#1084](https://github.com/0xMiden/node/pull/1084)). +- A transaction's account delta is now checked against its commitments in `SubmitProvenTransaction` endpoint ([#1093](https://github.com/0xMiden/node/pull/1093)). +- Added check for Account Id prefix uniqueness when transactions to create accounts are submitted to the mempool ([#1094](https://github.com/0xMiden/node/pull/1094)). +- Added benchmark CLI sub-command for the `miden-store` component to measure the state load time ([#1154](https://github.com/0xMiden/node/pull/1154)). +- Retry failed network notes with exponential backoff instead of immediately ([#1116](https://github.com/0xMiden/node/pull/1116)) +- Network notes are now dropped after failing 30 times ([#1116](https://github.com/0xMiden/node/pull/1116)) +- gRPC server timeout is now configurable (defaults to `10s`) ([#1133](https://github.com/0xMiden/node/pull/1133)) +- [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/node/pull/#1045)). +- Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/node/pull/1140), [#1132](https://github.com/0xMiden/node/pull/1132)). +- Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/node/pull/1176)). +- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/node/pull/1219)). ### Changes - [BREAKING] Updated MSRV to 1.88. -- [BREAKING] De-duplicate storage of code in DB (no-migration) ([#1083](https://github.com/0xMiden/miden-node/issue/#1083)). -- [BREAKING] RPC accept header format changed from `application/miden.vnd+grpc.` to `application/vnd.miden; version=` ([#1084](https://github.com/0xMiden/miden-node/pull/1084)). -- [BREAKING] Integrated `FeeParameters` into block headers. ([#1122](https://github.com/0xMiden/miden-node/pull/1122)). -- [BREAKING] Genesis configuration now supports fees ([#1157](https://github.com/0xMiden/miden-node/pull/1157)). +- [BREAKING] De-duplicate storage of code in DB (no-migration) ([#1083](https://github.com/0xMiden/node/issue/#1083)). +- [BREAKING] RPC accept header format changed from `application/miden.vnd+grpc.` to `application/vnd.miden; version=` ([#1084](https://github.com/0xMiden/node/pull/1084)). +- [BREAKING] Integrated `FeeParameters` into block headers. ([#1122](https://github.com/0xMiden/node/pull/1122)). +- [BREAKING] Genesis configuration now supports fees ([#1157](https://github.com/0xMiden/node/pull/1157)). - Configure `NativeFaucet`, which determines the native asset used to pay fees - Configure the base verification fee - Note: fees are not yet activated, and this has no impact beyond setting these values in the block headers -- [BREAKING] Remove public store API `GetAccountStateDelta` ([#1162](https://github.com/0xMiden/miden-node/pull/1162)). -- Removed `faucet` binary ([#1172](https://github.com/0xMiden/miden-node/pull/1172)). -- Add `genesis_commitment` in `Status` response ([#1181](https://github.com/0xMiden/miden-node/pull/1181)). +- [BREAKING] Remove public store API `GetAccountStateDelta` ([#1162](https://github.com/0xMiden/node/pull/1162)). +- Removed `faucet` binary ([#1172](https://github.com/0xMiden/node/pull/1172)). +- Add `genesis_commitment` in `Status` response ([#1181](https://github.com/0xMiden/node/pull/1181)). ### Fixes - [BREAKING] Integrated proxy status endpoint into main proxy service, removing separate status port. -- RPC requests with wildcard (`*/*`) media-type are not longer rejected ([#1084](https://github.com/0xMiden/miden-node/pull/1084)). -- Stress-test CLI account now properly sets the storage mode and increment nonce in transactions ([#1113](https://github.com/0xMiden/miden-node/pull/1113)). -- [BREAKING] Update `notes` table schema to have a nullable `consumed_block_num` ([#1100](https://github.com/0xMiden/miden-node/pull/1100)). -- Network Transaction Builder now correctly discards non-single-target network notes instead of panicking ([#1166](https://github.com/0xMiden/miden-node/pull/1166)). +- RPC requests with wildcard (`*/*`) media-type are not longer rejected ([#1084](https://github.com/0xMiden/node/pull/1084)). +- Stress-test CLI account now properly sets the storage mode and increment nonce in transactions ([#1113](https://github.com/0xMiden/node/pull/1113)). +- [BREAKING] Update `notes` table schema to have a nullable `consumed_block_num` ([#1100](https://github.com/0xMiden/node/pull/1100)). +- Network Transaction Builder now correctly discards non-single-target network notes instead of panicking ([#1166](https://github.com/0xMiden/node/pull/1166)). ### Removed -- Moved the `miden-faucet` binary to the [`miden-faucet` repository](https://github.com/0xmiden/miden-faucet) ([#1179](https://github.com/0xMiden/miden-node/pull/1179)). +- Moved the `miden-faucet` binary to the [`miden-faucet` repository](https://github.com/0xmiden/miden-faucet) ([#1179](https://github.com/0xMiden/node/pull/1179)). ## v0.10.1 (2025-07-14) ### Fixes -- Network accounts are no longer disabled after one transaction ([#1086](https://github.com/0xMiden/miden-node/pull/1086)). +- Network accounts are no longer disabled after one transaction ([#1086](https://github.com/0xMiden/node/pull/1086)). ## v0.10.0 (2025-07-10) diff --git a/Cargo.toml b/Cargo.toml index 3e41e9c1d..da5015478 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ exclude = [".github/"] homepage = "https://miden.xyz" license = "MIT" readme = "README.md" -repository = "https://github.com/0xMiden/miden-node" +repository = "https://github.com/0xMiden/node" rust-version = "1.91" version = "0.14.0" diff --git a/README.md b/README.md index ae38d6d95..a06c00e4a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Miden node -[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/0xMiden/miden-node/blob/main/LICENSE) +[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/0xMiden/node/blob/main/LICENSE) [![CI](https://github.com/0xMiden/node/actions/workflows/ci.yml/badge.svg)](https://github.com/0xMiden/node/actions/workflows/ci.yml) [![RUST_VERSION](https://img.shields.io/badge/rustc-1.90+-lightgray.svg)](https://www.rust-lang.org/tools/install) [![crates.io](https://img.shields.io/crates/v/miden-node)](https://crates.io/crates/miden-node) diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 30aef2637..bf41b46d3 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -38,8 +38,8 @@ FROM runtime-base AS runtime COPY --from=builder /app/target/release/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ org.opencontainers.image.url=https://0xMiden.github.io/ \ - org.opencontainers.image.documentation=https://github.com/0xMiden/miden-node \ - org.opencontainers.image.source=https://github.com/0xMiden/miden-node \ + org.opencontainers.image.documentation=https://github.com/0xMiden/node \ + org.opencontainers.image.source=https://github.com/0xMiden/node \ org.opencontainers.image.vendor=Miden \ org.opencontainers.image.licenses=MIT ARG CREATED diff --git a/bin/remote-prover/README.md b/bin/remote-prover/README.md index 364cfd56b..24a6f9f24 100644 --- a/bin/remote-prover/README.md +++ b/bin/remote-prover/README.md @@ -7,7 +7,7 @@ This enables weaker devices to offload the proof generation to a beefy remote se The implementation provides a configurable request queue and proves one request at a time in FIFO order. This is not intended to cover complex proxy setups nor load-balancing, but can instead be used as a starting point for more advanced setups. -The gRPC specification can be found in the [Miden repository](https://github.com/0xMiden/miden-node/blob/main/proto/proto/remote_prover.proto). +The gRPC specification can be found in the [Miden repository](https://github.com/0xMiden/node/blob/main/proto/proto/remote_prover.proto). Ensure you are viewing the appropriate version tag or commit. ## Quick start @@ -43,8 +43,8 @@ Install the Debian package: ```bash set -e -sudo wget https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-v0.8-arm64.deb -O prover.deb -sudo wget -q -O - https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover.checksum +sudo wget https://github.com/0xMiden/node/releases/download/v0.8/miden-prover-v0.8-arm64.deb -O prover.deb +sudo wget -q -O - https://github.com/0xMiden/node/releases/download/v0.8/miden-prover-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover.checksum sudo sha256sum prover.deb | awk '{print $1}' > prover.sha256 sudo diff prover.sha256 prover.checksum sudo dpkg -i prover.deb @@ -125,7 +125,7 @@ The server implements the following health and status related gRPC services: - [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) - [gRPC Reflection](https://grpc.io/docs/guides/reflection/) -- [WorkerStatusApi](https://github.com/0xMiden/miden-node/blob/main/proto/proto/remote_prover.proto) +- [WorkerStatusApi](https://github.com/0xMiden/node/blob/main/proto/proto/remote_prover.proto) The server supports OpenTelemetry traces which can be configured using the environment variables specified in the OpenTelemetry documentation. diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 14de0471f..77cd9f4f4 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -365,7 +365,7 @@ pub async fn load_smt_forest( // Process each account in this page for account_id in page.account_ids { // TODO: Loading the full account from the database is inefficient and will need to - // go away. + // go away. let account_info = db.select_account(account_id).await?; let account = account_info .details diff --git a/docs/external/src/index.md b/docs/external/src/index.md index b53f7a753..b4d1f0afe 100644 --- a/docs/external/src/index.md +++ b/docs/external/src/index.md @@ -20,6 +20,6 @@ interface for users, dApps, wallets and other clients to submit transactions and ## Feedback Please report any issues, ask questions or leave feedback in the node repository -[here](https://github.com/0xMiden/miden-node/issues/new/choose). +[here](https://github.com/0xMiden/node/issues/new/choose). This includes outdated, misleading, incorrect or just plain confusing information :) diff --git a/docs/external/src/operator/index.md b/docs/external/src/operator/index.md index 72dc0992d..f362151a8 100644 --- a/docs/external/src/operator/index.md +++ b/docs/external/src/operator/index.md @@ -4,4 +4,4 @@ Welcome to the `Miden` node operator guide which should cover everything you nee Miden node. You can report any issues, ask questions or leave feedback at our project repo -[here](https://github.com/0xMiden/miden-node/issues/new/choose). +[here](https://github.com/0xMiden/node/issues/new/choose). diff --git a/docs/external/src/operator/installation.md b/docs/external/src/operator/installation.md index 662d76851..7af6b84d6 100644 --- a/docs/external/src/operator/installation.md +++ b/docs/external/src/operator/installation.md @@ -8,7 +8,7 @@ We provide Debian packages for official releases for the node software. Alternat ## Debian package -Official Debian packages are available under our [releases](https://github.com/0xMiden/miden-node/releases) page. +Official Debian packages are available under our [releases](https://github.com/0xMiden/node/releases) page. Both `amd64` and `arm64` packages are available. Note that the packages include a `systemd` service which is disabled by default. @@ -69,13 +69,13 @@ this for advanced use only. The incantation is a little different as you'll be t ```sh # Install from a specific branch -cargo install --locked --git https://github.com/0xMiden/miden-node miden-node --branch +cargo install --locked --git https://github.com/0xMiden/node miden-node --branch # Install a specific tag -cargo install --locked --git https://github.com/0xMiden/miden-node miden-node --tag +cargo install --locked --git https://github.com/0xMiden/node miden-node --tag # Install a specific git revision -cargo install --locked --git https://github.com/0xMiden/miden-node miden-node --rev +cargo install --locked --git https://github.com/0xMiden/node miden-node --rev ``` More information on the various `cargo install` options can be found diff --git a/docs/external/src/operator/usage.md b/docs/external/src/operator/usage.md index e8bd377bb..32dac5c11 100644 --- a/docs/external/src/operator/usage.md +++ b/docs/external/src/operator/usage.md @@ -142,4 +142,4 @@ source profile.env && miden-node <...> This works well on Linux and MacOS, but Windows requires some additional scripting unfortunately. -See the `.env` files in each of the binary crates' [directories](https://github.com/0xMiden/miden-node/tree/next/bin) for a list of all available environment variables. +See the `.env` files in each of the binary crates' [directories](https://github.com/0xMiden/node/tree/next/bin) for a list of all available environment variables. diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index 69b722406..7e4598d8a 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -7,7 +7,7 @@ sidebar_position: 1 This is a reference of the Node's public RPC interface. It consists of a gRPC API which may be used to submit transactions and query the state of the blockchain. -The gRPC service definition can be found in the Miden node's `proto` [directory](https://github.com/0xMiden/miden-node/tree/main/proto) in the `rpc.proto` file. +The gRPC service definition can be found in the Miden node's `proto` [directory](https://github.com/0xMiden/node/tree/main/proto) in the `rpc.proto` file. diff --git a/docs/internal/book.toml b/docs/internal/book.toml index 6a0ac5db7..3c1b2892a 100644 --- a/docs/internal/book.toml +++ b/docs/internal/book.toml @@ -6,7 +6,7 @@ multilingual = false title = "The Miden Node Developer Guide" [output.html] -git-repository-url = "https://github.com/0xMiden/miden-node" +git-repository-url = "https://github.com/0xMiden/node" [preprocessor.katex] after = ["links"] diff --git a/docs/internal/src/index.md b/docs/internal/src/index.md index 478f22e8b..00f85898a 100644 --- a/docs/internal/src/index.md +++ b/docs/internal/src/index.md @@ -14,6 +14,6 @@ It is also a good idea to familiarise yourself with the [operator manual](https: Living documents go stale - the code is the final arbitrator of truth. If you encounter any outdated, incorrect or misleading information, please -[open an issue](https://github.com/0xMiden/miden-node/issues/new/choose). +[open an issue](https://github.com/0xMiden/node/issues/new/choose). From bcb2bde8f9205e63767abf4de63ddd1f450540a6 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 3 Mar 2026 17:51:02 +0100 Subject: [PATCH 72/77] feat: move `rocksdb` backend for `LargeSmt` from `-crypto` to `-node` (#1499) --- Cargo.lock | 29 +- Cargo.toml | 44 +- bin/network-monitor/src/faucet.rs | 42 +- crates/large-smt-backend-rocksdb/Cargo.toml | 22 + crates/large-smt-backend-rocksdb/README.md | 45 + .../large-smt-backend-rocksdb/src/helpers.rs | 83 + crates/large-smt-backend-rocksdb/src/lib.rs | 59 + .../large-smt-backend-rocksdb/src/rocksdb.rs | 1329 +++++++++++++++++ crates/store/Cargo.toml | 39 +- crates/store/benches/account_tree.rs | 2 +- crates/store/src/accounts/mod.rs | 4 +- crates/store/src/state/loader.rs | 7 +- 12 files changed, 1628 insertions(+), 77 deletions(-) create mode 100644 crates/large-smt-backend-rocksdb/Cargo.toml create mode 100644 crates/large-smt-backend-rocksdb/README.md create mode 100644 crates/large-smt-backend-rocksdb/src/helpers.rs create mode 100644 crates/large-smt-backend-rocksdb/src/lib.rs create mode 100644 crates/large-smt-backend-rocksdb/src/rocksdb.rs diff --git a/Cargo.lock b/Cargo.lock index d1c6c7e7d..75fd39949 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2629,7 +2629,7 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miden-agglayer" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "fs-err", "miden-assembly", @@ -2700,7 +2700,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2769,7 +2769,6 @@ dependencies = [ "rand_core 0.9.5", "rand_hc", "rayon", - "rocksdb", "sha2", "sha3", "subtle", @@ -2817,6 +2816,17 @@ dependencies = [ "unicode-width 0.1.14", ] +[[package]] +name = "miden-large-smt-backend-rocksdb" +version = "0.14.0" +dependencies = [ + "miden-crypto", + "miden-protocol", + "rayon", + "rocksdb", + "winter-utils", +] + [[package]] name = "miden-mast-package" version = "0.20.6" @@ -3102,6 +3112,7 @@ dependencies = [ "miden-agglayer", "miden-block-prover", "miden-crypto", + "miden-large-smt-backend-rocksdb", "miden-node-db", "miden-node-proto", "miden-node-proto-build", @@ -3234,7 +3245,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "bech32", "fs-err", @@ -3264,7 +3275,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "proc-macro2", "quote", @@ -3347,7 +3358,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "fs-err", "miden-assembly", @@ -3364,7 +3375,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3387,7 +3398,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "miden-processor", "miden-protocol", @@ -3400,7 +3411,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" dependencies = [ "miden-protocol", "miden-tx", diff --git a/Cargo.toml b/Cargo.toml index da5015478..a6cd8d68f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ "crates/block-producer", "crates/db", "crates/grpc-error-macro", + "crates/large-smt-backend-rocksdb", "crates/ntx-builder", "crates/proto", "crates/remote-prover-client", @@ -45,33 +46,36 @@ debug = true [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } -miden-node-db = { path = "crates/db", version = "0.14" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } -miden-node-proto = { path = "crates/proto", version = "0.14" } -miden-node-proto-build = { path = "proto", version = "0.14" } -miden-node-rpc = { path = "crates/rpc", version = "0.14" } -miden-node-store = { path = "crates/store", version = "0.14" } -miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.14" } -miden-node-validator = { path = "crates/validator", version = "0.14" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } +miden-large-smt-backend-rocksdb = { path = "crates/large-smt-backend-rocksdb", version = "0.14" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } +miden-node-db = { path = "crates/db", version = "0.14" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } +miden-node-proto = { path = "crates/proto", version = "0.14" } +miden-node-proto-build = { path = "proto", version = "0.14" } +miden-node-rpc = { path = "crates/rpc", version = "0.14" } +miden-node-store = { path = "crates/store", version = "0.14" } +miden-node-test-macro = { path = "crates/test-macro" } +miden-node-utils = { path = "crates/utils", version = "0.14" } +miden-node-validator = { path = "crates/validator", version = "0.14" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } + # Temporary workaround until # is part of `rocksdb-rust` release miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "0.14" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } -miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } -miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base" } -miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base" } -miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } -miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } +miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } +miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } # Other miden dependencies. These should align with those expected by miden-base. -miden-air = { features = ["std", "testing"], version = "0.20" } -miden-crypto = { default-features = false, version = "0.19" } +miden-air = { features = ["std", "testing"], version = "0.20" } + +miden-crypto = { version = "0.19.7" } # External dependencies anyhow = { version = "1.0" } diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index caeafe055..1e50a173d 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -189,14 +189,15 @@ async fn perform_faucet_test( debug!("Generated account ID: {} (length: {})", account_id, account_id.len()); // Step 1: Request PoW challenge - let pow_url = faucet_url.join("/pow")?; - let response = client - .get(pow_url) - .query(&[("account_id", &account_id), ("amount", &MINT_AMOUNT.to_string())]) - .send() - .await?; + let mut pow_url = faucet_url.join("/pow")?; + pow_url + .query_pairs_mut() + .append_pair("account_id", &account_id) + .append_pair("amount", &MINT_AMOUNT.to_string()); - let response_text = response.text().await?; + let response = client.get(pow_url).send().await?; + + let response_text: String = response.text().await?; debug!("Faucet PoW response: {}", response_text); let challenge_response: PowChallengeResponse = serde_json::from_str(&response_text) @@ -215,21 +216,18 @@ async fn perform_faucet_test( debug!("Solved PoW challenge with nonce: {}", nonce); // Step 3: Request tokens with the solution - let tokens_url = faucet_url.join("/get_tokens")?; - - let response = client - .get(tokens_url) - .query(&[ - ("account_id", account_id.as_str()), - ("is_private_note", "false"), - ("asset_amount", &MINT_AMOUNT.to_string()), - ("challenge", &challenge_response.challenge), - ("nonce", &nonce.to_string()), - ]) - .send() - .await?; - - let response_text = response.text().await?; + let mut tokens_url = faucet_url.join("/get_tokens")?; + tokens_url + .query_pairs_mut() + .append_pair("account_id", account_id.as_str()) + .append_pair("is_private_note", "false") + .append_pair("asset_amount", &MINT_AMOUNT.to_string()) + .append_pair("challenge", &challenge_response.challenge) + .append_pair("nonce", &nonce.to_string()); + + let response = client.get(tokens_url).send().await?; + + let response_text: String = response.text().await?; let tokens_response: GetTokensResponse = serde_json::from_str(&response_text) .with_context(|| format!("Failed to parse tokens response: {response_text}"))?; diff --git a/crates/large-smt-backend-rocksdb/Cargo.toml b/crates/large-smt-backend-rocksdb/Cargo.toml new file mode 100644 index 000000000..c7f009f92 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors.workspace = true +description = "Large-scale Sparse Merkle Tree backed by pluggable storage - RocksDB backend" +edition.workspace = true +homepage.workspace = true +keywords = ["merkle", "miden", "node", "smt"] +license.workspace = true +name = "miden-large-smt-backend-rocksdb" +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +miden-crypto = { features = ["concurrent", "std"], workspace = true } +miden-protocol = { features = ["std"], workspace = true } +rayon = { version = "1.10" } +rocksdb = { default-features = false, features = ["bindgen-runtime", "lz4"], version = "0.24" } +winter-utils = { version = "0.13" } diff --git a/crates/large-smt-backend-rocksdb/README.md b/crates/large-smt-backend-rocksdb/README.md new file mode 100644 index 000000000..4b612c325 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/README.md @@ -0,0 +1,45 @@ +# miden-large-smt-backend-rocksdb + +Large-scale Sparse Merkle Tree backed by pluggable storage - RocksDB backend implementation. + +This crate provides `LargeSmt`, a hybrid SMT implementation that stores the top of the tree +(depths 0–23) in memory and persists the lower depths (24–64) in storage as fixed-size subtrees. +This hybrid layout scales beyond RAM while keeping common operations fast. + +## Migration Status + +This crate is the future home for `LargeSmt` and its storage backends. Currently it re-exports +types from `miden-protocol` (which re-exports from `miden-crypto`). + +The migration will be completed in phases: +1. ✅ Create this crate as a re-export layer (current state) +2. Copy the full implementation from miden-crypto to this crate +3. Update miden-crypto to remove the rocksdb feature +4. Update dependents to use this crate directly + +## Features + +- **concurrent**: Enables parallel processing with rayon (enabled by default) +- **rocksdb**: (Future) Enables RocksDB storage backend + +## Usage + +```rust +use miden_large_smt::{LargeSmt, MemoryStorage}; + +// Create an empty tree with in-memory storage +let storage = MemoryStorage::new(); +let smt = LargeSmt::new(storage).unwrap(); +``` + +## Re-exported Types + +This crate re-exports the following types from `miden-protocol`: + +- `LargeSmt` - The large-scale SMT implementation +- `LargeSmtError` - Error type for LargeSmt operations +- `MemoryStorage` - In-memory storage backend +- `SmtStorage` - Storage backend trait +- `Subtree` - Serializable subtree representation +- `StorageUpdates` / `StorageUpdateParts` - Batch update types +- Various SMT types: `Smt`, `SmtLeaf`, `SmtProof`, `LeafIndex`, etc. diff --git a/crates/large-smt-backend-rocksdb/src/helpers.rs b/crates/large-smt-backend-rocksdb/src/helpers.rs new file mode 100644 index 000000000..23f3c8d88 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/src/helpers.rs @@ -0,0 +1,83 @@ +use miden_crypto::merkle::smt::{MAX_LEAF_ENTRIES, SmtLeaf, SmtLeafError}; +use miden_crypto::word::LexicographicWord; +use rocksdb::Error as RocksDbError; + +use crate::{StorageError, Word}; + +pub(crate) fn map_rocksdb_err(err: RocksDbError) -> StorageError { + StorageError::Backend(Box::new(err)) +} + +pub(crate) fn insert_into_leaf( + leaf: &mut SmtLeaf, + key: Word, + value: Word, +) -> Result, StorageError> { + match leaf { + SmtLeaf::Empty(_) => { + *leaf = SmtLeaf::new_single(key, value); + Ok(None) + }, + SmtLeaf::Single(kv_pair) => { + if kv_pair.0 == key { + let old_value = kv_pair.1; + kv_pair.1 = value; + Ok(Some(old_value)) + } else { + let mut pairs = vec![*kv_pair, (key, value)]; + pairs.sort_by(|(key_1, _), (key_2, _)| { + LexicographicWord::from(*key_1).cmp(&LexicographicWord::from(*key_2)) + }); + *leaf = SmtLeaf::Multiple(pairs); + Ok(None) + } + }, + SmtLeaf::Multiple(kv_pairs) => match kv_pairs.binary_search_by(|kv_pair| { + LexicographicWord::from(kv_pair.0).cmp(&LexicographicWord::from(key)) + }) { + Ok(pos) => { + let old_value = kv_pairs[pos].1; + kv_pairs[pos].1 = value; + Ok(Some(old_value)) + }, + Err(pos) => { + if kv_pairs.len() >= MAX_LEAF_ENTRIES { + return Err(StorageError::Leaf(SmtLeafError::TooManyLeafEntries { + actual: kv_pairs.len() + 1, + })); + } + kv_pairs.insert(pos, (key, value)); + Ok(None) + }, + }, + } +} + +pub(crate) fn remove_from_leaf(leaf: &mut SmtLeaf, key: Word) -> (Option, bool) { + match leaf { + SmtLeaf::Empty(_) => (None, false), + SmtLeaf::Single((key_at_leaf, value_at_leaf)) => { + if *key_at_leaf == key { + let old_value = *value_at_leaf; + *leaf = SmtLeaf::new_empty(key.into()); + (Some(old_value), true) + } else { + (None, false) + } + }, + SmtLeaf::Multiple(kv_pairs) => match kv_pairs.binary_search_by(|kv_pair| { + LexicographicWord::from(kv_pair.0).cmp(&LexicographicWord::from(key)) + }) { + Ok(pos) => { + let old_value = kv_pairs[pos].1; + kv_pairs.remove(pos); + debug_assert!(!kv_pairs.is_empty()); + if kv_pairs.len() == 1 { + *leaf = SmtLeaf::Single(kv_pairs[0]); + } + (Some(old_value), false) + }, + Err(_) => (None, false), + }, + } +} diff --git a/crates/large-smt-backend-rocksdb/src/lib.rs b/crates/large-smt-backend-rocksdb/src/lib.rs new file mode 100644 index 000000000..563439c9f --- /dev/null +++ b/crates/large-smt-backend-rocksdb/src/lib.rs @@ -0,0 +1,59 @@ +//! Large-scale Sparse Merkle Tree backed by pluggable storage. +//! +//! `LargeSmt` stores the top of the tree (depths 0–23) in memory and persists the lower +//! depths (24–64) in storage as fixed-size subtrees. This hybrid layout scales beyond RAM +//! while keeping common operations fast. +//! +//! # Usage +//! +//! ```ignore +//! use miden_large_smt::{LargeSmt, MemoryStorage}; +//! +//! // Create an empty tree with in-memory storage +//! let storage = MemoryStorage::new(); +//! let smt = LargeSmt::new(storage).unwrap(); +//! ``` +//! +//! ```ignore +//! use miden_large_smt_backend_rocksdb::{LargeSmt, RocksDbConfig, RocksDbStorage}; +//! +//! let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db")).unwrap(); +//! let smt = LargeSmt::new(storage).unwrap(); +//! ``` + +extern crate alloc; + +mod helpers; +#[expect(clippy::doc_markdown, clippy::inline_always)] +mod rocksdb; +// Re-export from miden-protocol. +pub use miden_protocol::crypto::merkle::smt::{ + InnerNode, + LargeSmt, + LargeSmtError, + LeafIndex, + MemoryStorage, + SMT_DEPTH, + Smt, + SmtLeaf, + SmtLeafError, + SmtProof, + SmtStorage, + StorageError, + StorageUpdateParts, + StorageUpdates, + Subtree, + SubtreeError, + SubtreeUpdate, +}; +// Also re-export commonly used types for convenience +pub use miden_protocol::{ + EMPTY_WORD, + Felt, + Word, + crypto::{ + hash::rpo::Rpo256, + merkle::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, NodeIndex, SparseMerklePath}, + }, +}; +pub use rocksdb::{RocksDbConfig, RocksDbStorage}; diff --git a/crates/large-smt-backend-rocksdb/src/rocksdb.rs b/crates/large-smt-backend-rocksdb/src/rocksdb.rs new file mode 100644 index 000000000..92f187c4d --- /dev/null +++ b/crates/large-smt-backend-rocksdb/src/rocksdb.rs @@ -0,0 +1,1329 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use std::path::PathBuf; +use std::sync::Arc; + +use miden_crypto::Map; +use miden_crypto::merkle::NodeIndex; +use miden_crypto::merkle::smt::{InnerNode, SmtLeaf, Subtree}; +use rocksdb::{ + BlockBasedOptions, + Cache, + ColumnFamilyDescriptor, + DB, + DBCompactionStyle, + DBCompressionType, + DBIteratorWithThreadMode, + FlushOptions, + IteratorMode, + Options, + ReadOptions, + WriteBatch, +}; +use winter_utils::{Deserializable, Serializable}; + +use super::{SmtStorage, StorageError, StorageUpdateParts, StorageUpdates, SubtreeUpdate}; +use crate::helpers::{insert_into_leaf, map_rocksdb_err, remove_from_leaf}; +use crate::{EMPTY_WORD, Word}; + +const IN_MEMORY_DEPTH: u8 = 24; + +/// The name of the `RocksDB` column family used for storing SMT leaves. +const LEAVES_CF: &str = "leaves"; +/// The names of the `RocksDB` column families used for storing SMT subtrees (deep nodes). +const SUBTREE_24_CF: &str = "st24"; +const SUBTREE_32_CF: &str = "st32"; +const SUBTREE_40_CF: &str = "st40"; +const SUBTREE_48_CF: &str = "st48"; +const SUBTREE_56_CF: &str = "st56"; +const SUBTREE_DEPTHS: [u8; 5] = [56, 48, 40, 32, 24]; + +/// The name of the `RocksDB` column family used for storing metadata (e.g., root, counts). +const METADATA_CF: &str = "metadata"; +/// The name of the `RocksDB` column family used for storing level 24 hashes for fast tree +/// rebuilding. +const DEPTH_24_CF: &str = "depth24"; + +/// The key used in the `METADATA_CF` column family to store the SMT's root hash. +const ROOT_KEY: &[u8] = b"smt_root"; +/// The key used in the `METADATA_CF` column family to store the total count of non-empty leaves. +const LEAF_COUNT_KEY: &[u8] = b"leaf_count"; +/// The key used in the `METADATA_CF` column family to store the total count of key-value entries. +const ENTRY_COUNT_KEY: &[u8] = b"entry_count"; + +/// A `RocksDB`-backed persistent storage implementation for a Sparse Merkle Tree (SMT). +/// +/// Implements the `SmtStorage` trait, providing durable storage for SMT components +/// including leaves, subtrees (for deeper parts of the tree), and metadata like the SMT root +/// and counts. It leverages `RocksDB` column families to organize data: +/// - `LEAVES_CF` ("leaves"): Stores `SmtLeaf` data, keyed by their logical u64 index. +/// - `SUBTREE_24_CF` ("st24"): Stores serialized `Subtree` data at depth 24, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_32_CF` ("st32"): Stores serialized `Subtree` data at depth 32, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_40_CF` ("st40"): Stores serialized `Subtree` data at depth 40, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_48_CF` ("st48"): Stores serialized `Subtree` data at depth 48, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_56_CF` ("st56"): Stores serialized `Subtree` data at depth 56, keyed by their root +/// `NodeIndex`. +/// - `METADATA_CF` ("metadata"): Stores overall SMT metadata such as the current root hash, total +/// leaf count, and total entry count. +#[derive(Debug, Clone)] +pub struct RocksDbStorage { + db: Arc, +} + +impl RocksDbStorage { + /// Opens or creates a RocksDB database at the specified `path` and configures it for SMT + /// storage. + /// + /// This method sets up the necessary column families (`leaves`, `subtrees`, `metadata`) + /// and applies various RocksDB options for performance, such as caching, bloom filters, + /// and compaction strategies tailored for SMT workloads. + /// + /// # Errors + /// Returns `StorageError::Backend` if the database cannot be opened or configured, + /// for example, due to path issues, permissions, or RocksDB internal errors. + pub fn open(config: RocksDbConfig) -> Result { + // Base DB options + let mut db_opts = Options::default(); + // Create DB if it doesn't exist + db_opts.create_if_missing(true); + // Auto-create missing column families + db_opts.create_missing_column_families(true); + #[expect(clippy::cast_possible_wrap)] + // Tune compaction threads to match CPU cores + db_opts.increase_parallelism(rayon::current_num_threads() as i32); + // Limit the number of open file handles + db_opts.set_max_open_files(config.max_open_files); + #[expect(clippy::cast_possible_wrap)] + // Parallelize flush/compaction up to CPU count + db_opts.set_max_background_jobs(rayon::current_num_threads() as i32); + // Maximum WAL size + db_opts.set_max_total_wal_size(512 * 1024 * 1024); + + // Shared block cache across all column families + let cache = Cache::new_lru_cache(config.cache_size); + + // Common table options for bloom filtering and cache + let mut table_opts = BlockBasedOptions::default(); + // Use shared LRU cache for block data + table_opts.set_block_cache(&cache); + table_opts.set_bloom_filter(10.0, false); + // Enable whole-key bloom filtering (better with point lookups) + table_opts.set_whole_key_filtering(true); + // Pin L0 filter and index blocks in cache (improves performance) + table_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + + // Column family for leaves + let mut leaves_opts = Options::default(); + leaves_opts.set_block_based_table_factory(&table_opts); + // 128 MB memtable + leaves_opts.set_write_buffer_size(128 << 20); + // Allow up to 3 memtables + leaves_opts.set_max_write_buffer_number(3); + leaves_opts.set_min_write_buffer_number_to_merge(1); + // Do not retain flushed memtables in memory + leaves_opts.set_max_write_buffer_size_to_maintain(0); + // Use level-based compaction + leaves_opts.set_compaction_style(DBCompactionStyle::Level); + // 512 MB target file size + leaves_opts.set_target_file_size_base(512 << 20); + leaves_opts.set_target_file_size_multiplier(2); + // LZ4 compression + leaves_opts.set_compression_type(DBCompressionType::Lz4); + // Set level-based compaction parameters + leaves_opts.set_level_zero_file_num_compaction_trigger(8); + + // Helper to build subtree CF options with correct prefix length + #[expect(clippy::items_after_statements)] + fn subtree_cf(cache: &Cache, bloom_filter_bits: f64) -> Options { + let mut tbl = BlockBasedOptions::default(); + // Use shared LRU cache for block data + tbl.set_block_cache(cache); + // Set bloom filter for subtree lookups + tbl.set_bloom_filter(bloom_filter_bits, false); + // Enable whole-key bloom filtering + tbl.set_whole_key_filtering(true); + // Pin L0 filter and index blocks in cache + tbl.set_pin_l0_filter_and_index_blocks_in_cache(true); + + let mut opts = Options::default(); + opts.set_block_based_table_factory(&tbl); + // 128 MB memtable + opts.set_write_buffer_size(128 << 20); + opts.set_max_write_buffer_number(3); + opts.set_min_write_buffer_number_to_merge(1); + // Do not retain flushed memtables in memory + opts.set_max_write_buffer_size_to_maintain(0); + // Use level-based compaction + opts.set_compaction_style(DBCompactionStyle::Level); + // 512 MB target file size + opts.set_target_file_size_base(512 << 20); + opts.set_target_file_size_multiplier(2); + // LZ4 compression + opts.set_compression_type(DBCompressionType::Lz4); + // Set level-based compaction parameters + opts.set_level_zero_file_num_compaction_trigger(8); + opts + } + + let mut depth24_opts = Options::default(); + depth24_opts.set_compression_type(DBCompressionType::Lz4); + depth24_opts.set_block_based_table_factory(&table_opts); + + // Metadata CF with no compression + let mut metadata_opts = Options::default(); + metadata_opts.set_compression_type(DBCompressionType::None); + + // Define column families with tailored options + let cfs = vec![ + ColumnFamilyDescriptor::new(LEAVES_CF, leaves_opts), + ColumnFamilyDescriptor::new(SUBTREE_24_CF, subtree_cf(&cache, 8.0)), + ColumnFamilyDescriptor::new(SUBTREE_32_CF, subtree_cf(&cache, 10.0)), + ColumnFamilyDescriptor::new(SUBTREE_40_CF, subtree_cf(&cache, 10.0)), + ColumnFamilyDescriptor::new(SUBTREE_48_CF, subtree_cf(&cache, 12.0)), + ColumnFamilyDescriptor::new(SUBTREE_56_CF, subtree_cf(&cache, 12.0)), + ColumnFamilyDescriptor::new(METADATA_CF, metadata_opts), + ColumnFamilyDescriptor::new(DEPTH_24_CF, depth24_opts), + ]; + + // Open the database with our tuned CFs + let db = DB::open_cf_descriptors(&db_opts, config.path, cfs).map_err(map_rocksdb_err)?; + + Ok(Self { db: Arc::new(db) }) + } + + /// Syncs the RocksDB database to disk. + /// + /// This ensures that all data is persisted to disk. + /// + /// # Errors + /// - Returns `StorageError::Backend` if the flush operation fails. + fn sync(&self) -> Result<(), StorageError> { + let mut fopts = FlushOptions::default(); + fopts.set_wait(true); + + for name in [ + LEAVES_CF, + SUBTREE_24_CF, + SUBTREE_32_CF, + SUBTREE_40_CF, + SUBTREE_48_CF, + SUBTREE_56_CF, + METADATA_CF, + DEPTH_24_CF, + ] { + let cf = self.cf_handle(name)?; + self.db.flush_cf_opt(cf, &fopts).map_err(map_rocksdb_err)?; + } + + self.db.flush_wal(true).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Converts an index (u64) into a fixed-size byte array for use as a `RocksDB` key. + #[inline(always)] + fn index_db_key(index: u64) -> [u8; 8] { + index.to_be_bytes() + } + + /// Converts a `NodeIndex` (for a subtree root) into a `KeyBytes` for use as a `RocksDB` key. + /// The `KeyBytes` is a wrapper around a 8-byte value with a variable-length prefix. + #[inline(always)] + fn subtree_db_key(index: NodeIndex) -> KeyBytes { + let keep = match index.depth() { + 24 => 3, + 32 => 4, + 40 => 5, + 48 => 6, + 56 => 7, + d => panic!("unsupported depth {d}"), + }; + KeyBytes::new(index.value(), keep) + } + + /// Retrieves a handle to a `RocksDB` column family by its name. + /// + /// # Errors + /// Returns `StorageError::Backend` if the column family with the given `name` does not + /// exist. + fn cf_handle(&self, name: &str) -> Result<&rocksdb::ColumnFamily, StorageError> { + self.db + .cf_handle(name) + .ok_or_else(|| StorageError::Unsupported(format!("unknown column family `{name}`"))) + } + + /* helper: CF handle from NodeIndex ------------------------------------- */ + #[inline(always)] + fn subtree_cf(&self, index: NodeIndex) -> &rocksdb::ColumnFamily { + let name = cf_for_depth(index.depth()); + self.cf_handle(name).expect("CF handle missing") + } +} + +impl SmtStorage for RocksDbStorage { + /// Retrieves the SMT root hash from the `METADATA_CF` column family. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::DeserializationError`: If the retrieved root hash bytes cannot be + /// deserialized. + fn get_root(&self) -> Result, StorageError> { + let cf = self.cf_handle(METADATA_CF)?; + match self.db.get_cf(cf, ROOT_KEY).map_err(map_rocksdb_err)? { + Some(bytes) => { + let digest = Word::read_from_bytes(&bytes)?; + Ok(Some(digest)) + }, + None => Ok(None), + } + } + + /// Stores the SMT root hash in the `METADATA_CF` column family. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + fn set_root(&self, root: Word) -> Result<(), StorageError> { + let cf = self.cf_handle(METADATA_CF)?; + self.db.put_cf(cf, ROOT_KEY, root.to_bytes()).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Retrieves the total count of non-empty leaves from the `METADATA_CF` column family. + /// Returns 0 if the count is not found. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::BadValueLen`: If the retrieved count bytes are invalid. + fn leaf_count(&self) -> Result { + let cf = self.cf_handle(METADATA_CF)?; + self.db + .get_cf(cf, LEAF_COUNT_KEY) + .map_err(map_rocksdb_err)? + .map_or(Ok(0), |bytes| { + let arr: [u8; 8] = + bytes.as_slice().try_into().map_err(|_| StorageError::BadValueLen { + what: "leaf count", + expected: 8, + found: bytes.len(), + })?; + Ok(usize::from_be_bytes(arr)) + }) + } + + /// Retrieves the total count of key-value entries from the `METADATA_CF` column family. + /// Returns 0 if the count is not found. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::BadValueLen`: If the retrieved count bytes are invalid. + fn entry_count(&self) -> Result { + let cf = self.cf_handle(METADATA_CF)?; + self.db + .get_cf(cf, ENTRY_COUNT_KEY) + .map_err(map_rocksdb_err)? + .map_or(Ok(0), |bytes| { + let arr: [u8; 8] = + bytes.as_slice().try_into().map_err(|_| StorageError::BadValueLen { + what: "entry count", + expected: 8, + found: bytes.len(), + })?; + Ok(usize::from_be_bytes(arr)) + }) + } + + /// Inserts a key-value pair into the SMT leaf at the specified logical `index`. + /// + /// This operation involves: + /// 1. Retrieving the current leaf (if any) at `index`. + /// 2. Inserting the new key-value pair into the leaf. + /// 3. Updating the leaf and entry counts in the metadata column family. + /// 4. Writing all changes (leaf data, counts) to RocksDB in a single batch. + /// + /// Note: This only updates the leaf. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If existing leaf data is corrupt. + #[expect(clippy::single_match_else)] + fn insert_value( + &self, + index: u64, + key: Word, + value: Word, + ) -> Result, StorageError> { + debug_assert_ne!(value, EMPTY_WORD); + + let mut batch = WriteBatch::default(); + + // Fetch initial counts. + let mut current_leaf_count = self.leaf_count()?; + let mut current_entry_count = self.entry_count()?; + + let leaves_cf = self.cf_handle(LEAVES_CF)?; + let db_key = Self::index_db_key(index); + + let maybe_leaf = self.get_leaf(index)?; + + let value_to_return: Option = match maybe_leaf { + Some(mut existing_leaf) => { + let old_value = insert_into_leaf(&mut existing_leaf, key, value)?; + // Determine if the overall SMT entry_count needs to change. + // entry_count increases if: + // 1. The key was not present in this leaf before (`old_value` is `None`). + // 2. The key was present but held `EMPTY_WORD` (`old_value` is + // `Some(EMPTY_WORD)`). + if old_value.is_none_or(|old_v| old_v == EMPTY_WORD) { + current_entry_count += 1; + } + // current_leaf_count does not change because the leaf itself already existed. + batch.put_cf(leaves_cf, db_key, existing_leaf.to_bytes()); + old_value + }, + None => { + // Leaf at `index` does not exist, so create a new one. + let new_leaf = SmtLeaf::Single((key, value)); + // A new leaf is created. + current_leaf_count += 1; + // This new leaf contains one new SMT entry. + current_entry_count += 1; + batch.put_cf(leaves_cf, db_key, new_leaf.to_bytes()); + // No previous value, as the leaf (and thus the key in it) was new. + None + }, + }; + + // Add updated metadata counts to the batch. + let metadata_cf = self.cf_handle(METADATA_CF)?; + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, current_leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, current_entry_count.to_be_bytes()); + + // Atomically write all changes (leaf data and metadata counts). + self.db.write(batch).map_err(map_rocksdb_err)?; + + Ok(value_to_return) + } + + /// Removes a key-value pair from the SMT leaf at the specified logical `index`. + /// + /// This operation involves: + /// 1. Retrieving the leaf at `index`. + /// 2. Removing the `key` from the leaf. If the leaf becomes empty, it's deleted from RocksDB. + /// 3. Updating the leaf and entry counts in the metadata column family. + /// 4. Writing all changes (leaf data/deletion, counts) to RocksDB in a single batch. + /// + /// Returns `Ok(None)` if the leaf at `index` does not exist or the `key` is not found. + /// + /// Note: This only updates the leaf. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If existing leaf data is corrupt. + fn remove_value(&self, index: u64, key: Word) -> Result, StorageError> { + let Some(mut leaf) = self.get_leaf(index)? else { + return Ok(None); + }; + + let mut batch = WriteBatch::default(); + let cf = self.cf_handle(LEAVES_CF)?; + let metadata_cf = self.cf_handle(METADATA_CF)?; + let db_key = Self::index_db_key(index); + let mut entry_count = self.entry_count()?; + let mut leaf_count = self.leaf_count()?; + + let (current_value, is_empty) = remove_from_leaf(&mut leaf, key); + if let Some(current_value) = current_value + && current_value != EMPTY_WORD + { + entry_count -= 1; + } + if is_empty { + leaf_count -= 1; + batch.delete_cf(cf, db_key); + } else { + batch.put_cf(cf, db_key, leaf.to_bytes()); + } + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, entry_count.to_be_bytes()); + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(current_value) + } + + /// Retrieves a single SMT leaf node by its logical `index` from the `LEAVES_CF` column family. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If the retrieved leaf data is corrupt. + fn get_leaf(&self, index: u64) -> Result, StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let key = Self::index_db_key(index); + match self.db.get_cf(cf, key).map_err(map_rocksdb_err)? { + Some(bytes) => { + let leaf = SmtLeaf::read_from_bytes(&bytes)?; + Ok(Some(leaf)) + }, + None => Ok(None), + } + } + + /// Sets or updates multiple SMT leaf nodes in the `LEAVES_CF` column family. + /// + /// This method performs a batch write to RocksDB. It also updates the global + /// leaf and entry counts in the `METADATA_CF` based on the provided `leaves` map, + /// overwriting any previous counts. + /// + /// Note: This method assumes the provided `leaves` map represents the entirety + /// of leaves to be stored or that counts are being explicitly reset. + /// Note: This only updates the leaves. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs. + fn set_leaves(&self, leaves: Map) -> Result<(), StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let leaf_count: usize = leaves.len(); + let entry_count: usize = leaves.values().map(|leaf| leaf.entries().len()).sum(); + let mut batch = WriteBatch::default(); + for (idx, leaf) in leaves { + let key = Self::index_db_key(idx); + let value = leaf.to_bytes(); + batch.put_cf(cf, key, &value); + } + let metadata_cf = self.cf_handle(METADATA_CF)?; + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, entry_count.to_be_bytes()); + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Removes a single SMT leaf node by its logical `index` from the `LEAVES_CF` column family. + /// + /// Important: This method currently *does not* update the global leaf and entry counts + /// in the metadata. Callers are responsible for managing these counts separately + /// if using this method directly, or preferably use `apply` or `remove_value` which handle + /// counts. + /// + /// Note: This only removes the leaf. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If the retrieved (to be returned) leaf data is + /// corrupt. + fn remove_leaf(&self, index: u64) -> Result, StorageError> { + let key = Self::index_db_key(index); + let cf = self.cf_handle(LEAVES_CF)?; + let old_bytes = self.db.get_cf(cf, key).map_err(map_rocksdb_err)?; + self.db.delete_cf(cf, key).map_err(map_rocksdb_err)?; + Ok(old_bytes + .map(|bytes| SmtLeaf::read_from_bytes(&bytes).expect("failed to deserialize leaf"))) + } + + /// Retrieves multiple SMT leaf nodes by their logical `indices` using RocksDB's `multi_get_cf`. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If any retrieved leaf data is corrupt. + fn get_leaves(&self, indices: &[u64]) -> Result>, StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let db_keys: Vec<[u8; 8]> = indices.iter().map(|&idx| Self::index_db_key(idx)).collect(); + let results = self.db.multi_get_cf(db_keys.iter().map(|k| (cf, k.as_ref()))); + + results + .into_iter() + .map(|result| match result { + Ok(Some(bytes)) => Ok(Some(SmtLeaf::read_from_bytes(&bytes)?)), + Ok(None) => Ok(None), + Err(e) => Err(map_rocksdb_err(e)), + }) + .collect() + } + + /// Returns true if the storage has any leaves. + /// + /// # Errors + /// Returns `StorageError` if the storage read operation fails. + fn has_leaves(&self) -> Result { + Ok(self.leaf_count()? > 0) + } + + /// Batch-retrieves multiple subtrees from RocksDB by their node indices. + /// + /// This method groups requests by subtree depth into column family buckets, + /// then performs parallel `multi_get` operations to efficiently retrieve + /// all subtrees. Results are deserialized and placed in the same order as + /// the input indices. + /// + /// Note: Retrieval is performed in parallel. If multiple errors occur (e.g., + /// deserialization or backend errors), only the first one encountered is returned. + /// Other errors will be discarded. + /// + /// # Parameters + /// - `indices`: A slice of subtree root indices to retrieve. + /// + /// # Returns + /// - A `Vec>` where each index corresponds to the original input. + /// - `Ok(...)` if all fetches succeed. + /// - `Err(StorageError)` if any RocksDB access or deserialization fails. + fn get_subtree(&self, index: NodeIndex) -> Result, StorageError> { + let cf = self.subtree_cf(index); + let key = Self::subtree_db_key(index); + match self.db.get_cf(cf, key).map_err(map_rocksdb_err)? { + Some(bytes) => { + let subtree = Subtree::from_vec(index, &bytes)?; + Ok(Some(subtree)) + }, + None => Ok(None), + } + } + + /// Batch-retrieves multiple subtrees from RocksDB by their node indices. + /// + /// This method groups requests by subtree depth into column family buckets, + /// then performs parallel `multi_get` operations to efficiently retrieve + /// all subtrees. Results are deserialized and placed in the same order as + /// the input indices. + /// + /// # Parameters + /// - `indices`: A slice of subtree root indices to retrieve. + /// + /// # Returns + /// - A `Vec>` where each index corresponds to the original input. + /// - `Ok(...)` if all fetches succeed. + /// - `Err(StorageError)` if any RocksDB access or deserialization fails. + fn get_subtrees(&self, indices: &[NodeIndex]) -> Result>, StorageError> { + use rayon::prelude::*; + + let mut depth_buckets: [Vec<(usize, NodeIndex)>; 5] = Default::default(); + + for (original_index, &node_index) in indices.iter().enumerate() { + let depth = node_index.depth(); + let bucket_index = match depth { + 56 => 0, + 48 => 1, + 40 => 2, + 32 => 3, + 24 => 4, + _ => { + return Err(StorageError::Unsupported(format!( + "unsupported subtree depth {depth}" + ))); + }, + }; + depth_buckets[bucket_index].push((original_index, node_index)); + } + let mut results = vec![None; indices.len()]; + + // Process depth buckets in parallel + let bucket_results: Result, StorageError> = depth_buckets + .into_par_iter() + .enumerate() + .filter(|(_, bucket)| !bucket.is_empty()) + .map( + |(bucket_index, bucket)| -> Result)>, StorageError> { + let depth = SUBTREE_DEPTHS[bucket_index]; + let cf = self.cf_handle(cf_for_depth(depth))?; + let keys: Vec<_> = + bucket.iter().map(|(_, idx)| Self::subtree_db_key(*idx)).collect(); + + let db_results = self.db.multi_get_cf(keys.iter().map(|k| (cf, k.as_ref()))); + + // Process results for this bucket + bucket + .into_iter() + .zip(db_results) + .map(|((original_index, node_index), db_result)| { + let subtree = match db_result { + Ok(Some(bytes)) => Some(Subtree::from_vec(node_index, &bytes)?), + Ok(None) => None, + Err(e) => return Err(map_rocksdb_err(e)), + }; + Ok((original_index, subtree)) + }) + .collect() + }, + ) + .collect(); + + // Flatten results and place them in correct positions + for bucket_result in bucket_results? { + for (original_index, subtree) in bucket_result { + results[original_index] = subtree; + } + } + + Ok(results) + } + + /// Stores a single subtree in RocksDB and optionally updates the depth-24 root cache. + /// + /// The subtree is serialized and written to its corresponding column family. + /// If it's a depth-24 subtree, the root node’s hash is also stored in the + /// dedicated `DEPTH_24_CF` cache to support top-level reconstruction. + /// + /// # Parameters + /// - `subtree`: A reference to the subtree to be stored. + /// + /// # Errors + /// - Returns `StorageError` if column family lookup, serialization, or the write operation + /// fails. + fn set_subtree(&self, subtree: &Subtree) -> Result<(), StorageError> { + let subtrees_cf = self.subtree_cf(subtree.root_index()); + let mut batch = WriteBatch::default(); + + let key = Self::subtree_db_key(subtree.root_index()); + let value = subtree.to_vec(); + batch.put_cf(subtrees_cf, key, value); + + // Also update level 24 hash cache if this is a level 24 subtree + if subtree.root_index().depth() == IN_MEMORY_DEPTH { + let root_hash = subtree + .get_inner_node(subtree.root_index()) + .ok_or_else(|| StorageError::Unsupported("Subtree root node not found".into()))? + .hash(); + + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + let hash_key = Self::index_db_key(subtree.root_index().value()); + batch.put_cf(depth24_cf, hash_key, root_hash.to_bytes()); + } + + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Bulk-writes subtrees to storage (bypassing WAL). + /// + /// This method writes a vector of serialized `Subtree` objects directly to their + /// corresponding RocksDB column families based on their root index. + /// + /// ⚠️ **Warning:** This function should only be used during **initial SMT construction**. + /// It disables the WAL, meaning writes are **not crash-safe** and can result in data loss + /// if the process terminates unexpectedly. + /// + /// # Parameters + /// - `subtrees`: A vector of `Subtree` objects to be serialized and persisted. + /// + /// # Errors + /// - Returns `StorageError::Backend` if any column family lookup or RocksDB write fails. + fn set_subtrees(&self, subtrees: Vec) -> Result<(), StorageError> { + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + let mut batch = WriteBatch::default(); + + for subtree in subtrees { + let subtrees_cf = self.subtree_cf(subtree.root_index()); + let key = Self::subtree_db_key(subtree.root_index()); + let value = subtree.to_vec(); + batch.put_cf(subtrees_cf, key, value); + + if subtree.root_index().depth() == IN_MEMORY_DEPTH + && let Some(root_node) = subtree.get_inner_node(subtree.root_index()) + { + let hash_key = Self::index_db_key(subtree.root_index().value()); + batch.put_cf(depth24_cf, hash_key, root_node.hash().to_bytes()); + } + } + + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Removes a single SMT Subtree from storage, identified by its root `NodeIndex`. + /// + /// # Errors + /// - `StorageError::Backend`: If the subtrees column family is missing or a RocksDB error + /// occurs. + fn remove_subtree(&self, index: NodeIndex) -> Result<(), StorageError> { + let subtrees_cf = self.subtree_cf(index); + let mut batch = WriteBatch::default(); + + let key = Self::subtree_db_key(index); + batch.delete_cf(subtrees_cf, key); + + // Also remove level 24 hash cache if this is a level 24 subtree + if index.depth() == IN_MEMORY_DEPTH { + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + let hash_key = Self::index_db_key(index.value()); + batch.delete_cf(depth24_cf, hash_key); + } + + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Retrieves a single inner node (non-leaf node) from within a Subtree. + /// + /// This method is intended for accessing nodes at depths greater than or equal to + /// `IN_MEMORY_DEPTH`. It first finds the appropriate Subtree containing the `index`, then + /// delegates to `Subtree::get_inner_node()`. + /// + /// # Errors + /// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur. + /// - `StorageError::Value`: If the containing Subtree data is corrupt. + fn get_inner_node(&self, index: NodeIndex) -> Result, StorageError> { + if index.depth() < IN_MEMORY_DEPTH { + return Err(StorageError::Unsupported( + "Cannot get inner node from upper part of the tree".into(), + )); + } + let subtree_root_index = Subtree::find_subtree_root(index); + Ok(self + .get_subtree(subtree_root_index)? + .and_then(|subtree| subtree.get_inner_node(index))) + } + + /// Sets or updates a single inner node (non-leaf node) within a Subtree. + /// + /// This method is intended for `index.depth() >= IN_MEMORY_DEPTH`. + /// If the target Subtree does not exist, it is created. The `node` is then + /// inserted into the Subtree, and the modified Subtree is written back to storage. + /// + /// # Errors + /// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur. + /// - `StorageError::Value`: If existing Subtree data is corrupt. + fn set_inner_node( + &self, + index: NodeIndex, + node: InnerNode, + ) -> Result, StorageError> { + if index.depth() < IN_MEMORY_DEPTH { + return Err(StorageError::Unsupported( + "Cannot set inner node in upper part of the tree".into(), + )); + } + + let subtree_root_index = Subtree::find_subtree_root(index); + let mut subtree = self + .get_subtree(subtree_root_index)? + .unwrap_or_else(|| Subtree::new(subtree_root_index)); + let old_node = subtree.insert_inner_node(index, node); + self.set_subtree(&subtree)?; + Ok(old_node) + } + + /// Removes a single inner node (non-leaf node) from within a Subtree. + /// + /// This method is intended for `index.depth() >= IN_MEMORY_DEPTH`. + /// If the Subtree becomes empty after removing the node, the Subtree itself + /// is removed from storage. + /// + /// # Errors + /// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur. + /// - `StorageError::Value`: If existing Subtree data is corrupt. + fn remove_inner_node(&self, index: NodeIndex) -> Result, StorageError> { + if index.depth() < IN_MEMORY_DEPTH { + return Err(StorageError::Unsupported( + "Cannot remove inner node from upper part of the tree".into(), + )); + } + + let subtree_root_index = Subtree::find_subtree_root(index); + self.get_subtree(subtree_root_index) + .and_then(|maybe_subtree| match maybe_subtree { + Some(mut subtree) => { + let old_node = subtree.remove_inner_node(index); + let db_operation_result = if subtree.is_empty() { + self.remove_subtree(subtree_root_index) + } else { + self.set_subtree(&subtree) + }; + db_operation_result.map(|_| old_node) + }, + None => Ok(None), + }) + } + + /// Applies a batch of `StorageUpdates` atomically to the RocksDB backend. + /// + /// This is the primary method for persisting changes to the SMT. It constructs a single + /// RocksDB `WriteBatch` containing all specified changes: + /// - Leaf updates/deletions in `LEAVES_CF`. + /// - Subtree updates/deletions in `SUBTREE_24_CF`, `SUBTREE_32_CF`, `SUBTREE_40_CF`, + /// `SUBTREE_48_CF`, `SUBTREE_56_CF`. + /// - Updates to leaf and entry counts in `METADATA_CF` based on `leaf_count_delta` and + /// `entry_count_delta`. + /// - Sets the new SMT root in `METADATA_CF`. + /// + /// All operations in the batch are applied atomically by RocksDB. + /// + /// # Errors + /// - `StorageError::Backend`: If any column family is missing or a RocksDB write error occurs. + fn apply(&self, updates: StorageUpdates) -> Result<(), StorageError> { + use rayon::prelude::*; + + let mut batch = WriteBatch::default(); + + let leaves_cf = self.cf_handle(LEAVES_CF)?; + let metadata_cf = self.cf_handle(METADATA_CF)?; + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + + let StorageUpdateParts { + leaf_updates, + subtree_updates, + new_root, + leaf_count_delta, + entry_count_delta, + } = updates.into_parts(); + + // Process leaf updates + for (index, maybe_leaf) in leaf_updates { + let key = Self::index_db_key(index); + match maybe_leaf { + Some(leaf) => batch.put_cf(leaves_cf, key, leaf.to_bytes()), + None => batch.delete_cf(leaves_cf, key), + } + } + + // Helper for depth 24 operations + let is_depth_24 = |index: NodeIndex| index.depth() == IN_MEMORY_DEPTH; + + // Parallel preparation of subtree operations + let subtree_ops: Result, StorageError> = subtree_updates + .into_par_iter() + .map(|update| -> Result<_, StorageError> { + let (index, maybe_bytes, depth24_op) = match update { + SubtreeUpdate::Store { index, subtree } => { + let bytes = subtree.to_vec(); + let depth24_op = is_depth_24(index) + .then(|| subtree.get_inner_node(index)) + .flatten() + .map(|root_node| { + let hash_key = Self::index_db_key(index.value()); + (hash_key, Some(root_node.hash().to_bytes())) + }); + (index, Some(bytes), depth24_op) + }, + SubtreeUpdate::Delete { index } => { + let depth24_op = is_depth_24(index).then(|| { + let hash_key = Self::index_db_key(index.value()); + (hash_key, None) + }); + (index, None, depth24_op) + }, + }; + + let key = Self::subtree_db_key(index); + let subtrees_cf = self.subtree_cf(index); + + Ok((subtrees_cf, key, maybe_bytes, depth24_op)) + }) + .collect(); + + // Sequential batch building + for (subtrees_cf, key, maybe_bytes, depth24_op) in subtree_ops? { + match maybe_bytes { + Some(bytes) => batch.put_cf(subtrees_cf, key, bytes), + None => batch.delete_cf(subtrees_cf, key), + } + + if let Some((hash_key, maybe_hash_bytes)) = depth24_op { + match maybe_hash_bytes { + Some(hash_bytes) => batch.put_cf(depth24_cf, hash_key, hash_bytes), + None => batch.delete_cf(depth24_cf, hash_key), + } + } + } + + if leaf_count_delta != 0 || entry_count_delta != 0 { + let current_leaf_count = self.leaf_count()?; + let current_entry_count = self.entry_count()?; + + let new_leaf_count = current_leaf_count.saturating_add_signed(leaf_count_delta); + let new_entry_count = current_entry_count.saturating_add_signed(entry_count_delta); + + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, new_leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, new_entry_count.to_be_bytes()); + } + + batch.put_cf(metadata_cf, ROOT_KEY, new_root.to_bytes()); + + let mut write_opts = rocksdb::WriteOptions::default(); + // Disable immediate WAL sync to disk for better performance + write_opts.set_sync(false); + self.db.write_opt(batch, &write_opts).map_err(map_rocksdb_err)?; + + Ok(()) + } + + /// Returns an iterator over all (logical u64 index, `SmtLeaf`) pairs in the `LEAVES_CF`. + /// + /// The iterator uses a RocksDB snapshot for consistency and iterates in lexicographical + /// order of the keys (leaf indices). Errors during iteration (e.g., deserialization issues) + /// cause the iterator to skip the problematic item and attempt to continue. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs + /// during iterator creation. + fn iter_leaves(&self) -> Result + '_>, StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let mut read_opts = ReadOptions::default(); + read_opts.set_total_order_seek(true); + let db_iter = self.db.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + + Ok(Box::new(RocksDbDirectLeafIterator { iter: db_iter })) + } + + /// Returns an iterator over all `Subtree` instances across all subtree column families. + /// + /// The iterator uses a RocksDB snapshot and iterates in lexicographical order of keys + /// (subtree root NodeIndex) across all depth column families (24, 32, 40, 48, 56). + /// Errors during iteration (e.g., deserialization issues) cause the iterator to skip + /// the problematic item and attempt to continue. + /// + /// # Errors + /// - `StorageError::Backend`: If any subtree column family is missing or a RocksDB error occurs + /// during iterator creation. + fn iter_subtrees(&self) -> Result + '_>, StorageError> { + // All subtree column family names in order + const SUBTREE_CFS: [&str; 5] = + [SUBTREE_24_CF, SUBTREE_32_CF, SUBTREE_40_CF, SUBTREE_48_CF, SUBTREE_56_CF]; + + let mut cf_handles = Vec::new(); + for cf_name in SUBTREE_CFS { + cf_handles.push(self.cf_handle(cf_name)?); + } + + Ok(Box::new(RocksDbSubtreeIterator::new(&self.db, cf_handles))) + } + + /// Retrieves all depth 24 hashes for fast tree rebuilding. + /// + /// # Errors + /// - `StorageError::Backend`: If the depth24 column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::Value`: If any hash bytes are corrupt. + fn get_depth24(&self) -> Result, StorageError> { + let cf = self.cf_handle(DEPTH_24_CF)?; + let iter = self.db.iterator_cf(cf, IteratorMode::Start); + let mut hashes = Vec::new(); + + for item in iter { + let (key_bytes, value_bytes) = item.map_err(map_rocksdb_err)?; + + let index = index_from_key_bytes(&key_bytes)?; + let hash = Word::read_from_bytes(&value_bytes)?; + + hashes.push((index, hash)); + } + + Ok(hashes) + } +} + +/// Syncs the RocksDB database to disk before dropping the storage. +/// +/// This ensures that all data is persisted to disk before the storage is dropped. +/// +/// # Panics +/// - If the RocksDB sync operation fails. +impl Drop for RocksDbStorage { + fn drop(&mut self) { + if let Err(e) = self.sync() { + panic!("failed to flush RocksDB on drop: {e}"); + } + } +} + +// ITERATORS +// -------------------------------------------------------------------------------------------- + +/// An iterator over leaves directly from RocksDB. +/// +/// Wraps a `DBIteratorWithThreadMode` and handles deserialization of keys to `u64` (leaf index) +/// and values to `SmtLeaf`. Skips items that fail to deserialize or if a RocksDB error occurs +/// for an item, attempting to continue iteration. +struct RocksDbDirectLeafIterator<'a> { + iter: DBIteratorWithThreadMode<'a, DB>, +} + +impl Iterator for RocksDbDirectLeafIterator<'_> { + type Item = (u64, SmtLeaf); + + fn next(&mut self) -> Option { + self.iter.find_map(|result| { + let (key_bytes, value_bytes) = result.ok()?; + let leaf_idx = index_from_key_bytes(&key_bytes).ok()?; + let leaf = SmtLeaf::read_from_bytes(&value_bytes).ok()?; + Some((leaf_idx, leaf)) + }) + } +} + +/// An iterator over subtrees from multiple RocksDB column families. +/// +/// Iterates through all subtree column families (24, 32, 40, 48, 56) sequentially. +/// When one column family is exhausted, it moves to the next one. +struct RocksDbSubtreeIterator<'a> { + db: &'a DB, + cf_handles: Vec<&'a rocksdb::ColumnFamily>, + current_cf_index: usize, + current_iter: Option>, +} + +impl<'a> RocksDbSubtreeIterator<'a> { + fn new(db: &'a DB, cf_handles: Vec<&'a rocksdb::ColumnFamily>) -> Self { + let mut iterator = Self { + db, + cf_handles, + current_cf_index: 0, + current_iter: None, + }; + iterator.advance_to_next_cf(); + iterator + } + + fn advance_to_next_cf(&mut self) { + if self.current_cf_index < self.cf_handles.len() { + let cf = self.cf_handles[self.current_cf_index]; + let mut read_opts = ReadOptions::default(); + read_opts.set_total_order_seek(true); + self.current_iter = Some(self.db.iterator_cf_opt(cf, read_opts, IteratorMode::Start)); + } else { + self.current_iter = None; + } + } + + fn try_next_from_iter( + iter: &mut DBIteratorWithThreadMode, + cf_index: usize, + ) -> Option { + iter.find_map(|result| { + let (key_bytes, value_bytes) = result.ok()?; + let depth = 24 + (cf_index * 8) as u8; + + let node_idx = subtree_root_from_key_bytes(&key_bytes, depth).ok()?; + let value_vec = value_bytes.into_vec(); + Subtree::from_vec(node_idx, &value_vec).ok() + }) + } +} + +impl Iterator for RocksDbSubtreeIterator<'_> { + type Item = Subtree; + + fn next(&mut self) -> Option { + loop { + let iter = self.current_iter.as_mut()?; + + // Try to get the next valid subtree from current iterator + if let Some(subtree) = Self::try_next_from_iter(iter, self.current_cf_index) { + return Some(subtree); + } + + // Current CF exhausted, advance to next + self.current_cf_index += 1; + self.advance_to_next_cf(); + + // If no more CFs, we're done + self.current_iter.as_ref()?; + } + } +} + +// ROCKSDB CONFIGURATION +// -------------------------------------------------------------------------------------------- + +/// Configuration for RocksDB storage used by the Sparse Merkle Tree implementation. +/// +/// This struct contains the essential configuration parameters needed to initialize +/// and optimize RocksDB for SMT storage operations. It provides sensible defaults +/// while allowing customization for specific performance requirements. +#[derive(Debug, Clone)] +pub struct RocksDbConfig { + /// The filesystem path where the RocksDB database will be stored. + /// + /// This should be a directory path that the application has read/write permissions for. + /// The database will create multiple files in this directory to store data, logs, and + /// metadata. + pub(crate) path: PathBuf, + + /// The size of the RocksDB block cache in bytes. + /// + /// This cache stores frequently accessed data blocks in memory to improve read performance. + /// Larger cache sizes generally improve read performance but consume more memory. + /// Default: 1GB (1 << 30 bytes) + pub(crate) cache_size: usize, + + /// The maximum number of files that RocksDB can have open simultaneously. + /// + /// This setting affects both memory usage and the number of file descriptors used by the + /// process. Higher values may improve performance for databases with many SST files but + /// increase resource usage. Default: 512 files + pub(crate) max_open_files: i32, +} + +impl RocksDbConfig { + /// Creates a new RocksDbConfig with the given database path and default settings. + /// + /// # Arguments + /// * `path` - The filesystem path where the RocksDB database will be stored. This can be any + /// type that converts into a `PathBuf`. + /// + /// # Default Settings + /// * `cache_size`: 1GB (1,073,741,824 bytes) + /// * `max_open_files`: 512 + /// + /// # Examples + /// ``` + /// use miden_large_smt_backend_rocksdb::RocksDbConfig; + /// + /// let config = RocksDbConfig::new("/path/to/database"); + /// ``` + pub fn new>(path: P) -> Self { + Self { + path: path.into(), + cache_size: 1 << 30, + max_open_files: 512, + } + } + + /// Sets the block cache size for RocksDB. + /// + /// The block cache stores frequently accessed data blocks in memory to improve read + /// performance. Larger cache sizes generally improve read performance but consume more + /// memory. + /// + /// # Arguments + /// * `size` - The cache size in bytes. + /// + /// # Examples + /// ``` + /// use miden_large_smt_backend_rocksdb::RocksDbConfig; + /// + /// let config = RocksDbConfig::new("/path/to/database") + /// .with_cache_size(2 * 1024 * 1024 * 1024); // 2GB cache + /// ``` + #[must_use] + pub fn with_cache_size(mut self, size: usize) -> Self { + self.cache_size = size; + self + } + + /// Sets the maximum number of files that RocksDB can have open simultaneously. + /// + /// This setting affects both memory usage and the number of file descriptors used by the + /// process. Higher values may improve performance for databases with many SST files but + /// increase resource usage. + /// + /// # Arguments + /// * `count` - The maximum number of open files. Must be positive. + /// + /// # Examples + /// ``` + /// use miden_large_smt_backend_rocksdb::RocksDbConfig; + /// + /// let config = RocksDbConfig::new("/path/to/database") + /// .with_max_open_files(1024); // Allow up to 1024 open files + /// ``` + #[must_use] + pub fn with_max_open_files(mut self, count: i32) -> Self { + self.max_open_files = count; + self + } +} + +// SUBTREE DB KEY +// -------------------------------------------------------------------------------------------- + +/// Compact key wrapper for variable-length subtree prefixes. +/// +/// * `bytes` always holds the big-endian 8-byte value. +/// * `len` is how many leading bytes are significant (3-7). +#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] +pub(crate) struct KeyBytes { + bytes: [u8; 8], + len: u8, +} + +impl KeyBytes { + #[inline(always)] + pub fn new(value: u64, keep: usize) -> Self { + debug_assert!((3..=7).contains(&keep)); + let bytes = value.to_be_bytes(); + debug_assert!(bytes[..8 - keep].iter().all(|&b| b == 0)); + Self { bytes, len: keep as u8 } + } + + #[inline(always)] + pub fn as_slice(&self) -> &[u8] { + &self.bytes[8 - self.len as usize..] + } +} + +impl AsRef<[u8]> for KeyBytes { + #[inline(always)] + fn as_ref(&self) -> &[u8] { + self.as_slice() + } +} + +// HELPERS +// -------------------------------------------------------------------------------------------- + +/// Deserializes an index (u64) from a RocksDB key byte slice. +/// Expects `key_bytes` to be exactly 8 bytes long. +/// +/// # Errors +/// - `StorageError::BadKeyLen`: If `key_bytes` is not 8 bytes long or conversion fails. +fn index_from_key_bytes(key_bytes: &[u8]) -> Result { + if key_bytes.len() != 8 { + return Err(StorageError::BadKeyLen { expected: 8, found: key_bytes.len() }); + } + let mut arr = [0u8; 8]; + arr.copy_from_slice(key_bytes); + Ok(u64::from_be_bytes(arr)) +} + +/// Reconstructs a `NodeIndex` from the variable-length subtree key stored in `RocksDB`. +/// +/// * `key_bytes` is the big-endian tail of the 64-bit value: +/// - depth 56 → 7 bytes +/// - depth 48 → 6 bytes +/// - depth 40 → 5 bytes +/// - depth 32 → 4 bytes +/// - depth 24 → 3 bytes +/// +/// # Errors +/// * `StorageError::Unsupported` - `depth` is not one of 24/32/40/48/56. +/// * `StorageError::DeserializationError` - `key_bytes.len()` does not match the length required by +/// `depth`. +#[inline(always)] +fn subtree_root_from_key_bytes(key_bytes: &[u8], depth: u8) -> Result { + let expected = match depth { + 24 => 3, + 32 => 4, + 40 => 5, + 48 => 6, + 56 => 7, + d => return Err(StorageError::Unsupported(format!("unsupported subtree depth {d}"))), + }; + + if key_bytes.len() != expected { + return Err(StorageError::BadSubtreeKeyLen { depth, expected, found: key_bytes.len() }); + } + let mut buf = [0u8; 8]; + buf[8 - expected..].copy_from_slice(key_bytes); + let value = u64::from_be_bytes(buf); + Ok(NodeIndex::new_unchecked(depth, value)) +} + +/// Helper that maps an SMT depth to its column family. +#[inline(always)] +fn cf_for_depth(depth: u8) -> &'static str { + match depth { + 24 => SUBTREE_24_CF, + 32 => SUBTREE_32_CF, + 40 => SUBTREE_40_CF, + 48 => SUBTREE_48_CF, + 56 => SUBTREE_56_CF, + _ => panic!("unsupported subtree depth: {depth}"), + } +} diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 17dcf9619..a5531d46f 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -15,24 +15,25 @@ version.workspace = true workspace = true [dependencies] -anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } -fs-err = { workspace = true } -futures = { workspace = true } -hex = { version = "0.4" } -indexmap = { workspace = true } -libsqlite3-sys = { workspace = true } -miden-block-prover = { workspace = true } -miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } -miden-node-db = { workspace = true } -miden-node-proto = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-utils = { workspace = true } -miden-remote-prover-client = { features = ["block-prover"], workspace = true } -miden-standards = { workspace = true } +anyhow = { workspace = true } +deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } +deadpool-diesel = { features = ["sqlite"], version = "0.6" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } +futures = { workspace = true } +hex = { version = "0.4" } +indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } +miden-block-prover = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-large-smt-backend-rocksdb = { optional = true, workspace = true } +miden-node-db = { workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } +miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } @@ -72,7 +73,7 @@ termtree = "1.0" [features] default = ["rocksdb"] -rocksdb = ["miden-crypto/rocksdb"] +rocksdb = ["dep:miden-large-smt-backend-rocksdb"] [[bench]] harness = false diff --git a/crates/store/benches/account_tree.rs b/crates/store/benches/account_tree.rs index 8c3f1009e..e69da7714 100644 --- a/crates/store/benches/account_tree.rs +++ b/crates/store/benches/account_tree.rs @@ -3,7 +3,7 @@ use std::path::Path; use std::sync::atomic::{AtomicUsize, Ordering}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; -use miden_crypto::merkle::smt::{RocksDbConfig, RocksDbStorage}; +use miden_large_smt_backend_rocksdb::{RocksDbConfig, RocksDbStorage}; use miden_node_store::AccountTreeWithHistory; use miden_protocol::Word; use miden_protocol::account::AccountId; diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index 2508c9d2d..f9815190b 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -2,6 +2,8 @@ use std::collections::{BTreeMap, HashMap}; +#[cfg(feature = "rocksdb")] +use miden_large_smt_backend_rocksdb::RocksDbStorage; use miden_protocol::account::{AccountId, AccountIdPrefix}; use miden_protocol::block::BlockNumber; use miden_protocol::block::account_tree::{AccountMutationSet, AccountTree, AccountWitness}; @@ -32,7 +34,7 @@ pub type InMemoryAccountTree = AccountTree>; #[cfg(feature = "rocksdb")] /// Convenience for a persistent account tree. -pub type PersistentAccountTree = AccountTree>; +pub type PersistentAccountTree = AccountTree>; // HISTORICAL ERROR TYPES // ================================================================================================ diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 77cd9f4f4..af678899e 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -13,6 +13,8 @@ use std::num::NonZeroUsize; use std::path::Path; use miden_crypto::merkle::mmr::Mmr; +#[cfg(feature = "rocksdb")] +use miden_large_smt_backend_rocksdb::{RocksDbConfig, RocksDbStorage}; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; use miden_protocol::block::{BlockNumber, Blockchain}; @@ -23,11 +25,6 @@ use miden_protocol::{Felt, FieldElement, Word}; #[cfg(feature = "rocksdb")] use tracing::info; use tracing::instrument; -#[cfg(feature = "rocksdb")] -use { - miden_crypto::merkle::smt::RocksDbStorage, - miden_protocol::crypto::merkle::smt::RocksDbConfig, -}; use crate::COMPONENT; use crate::db::Db; From 965282b7490e3f3ef6dca64545d3a9b21f34bde2 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 4 Mar 2026 09:30:56 +0100 Subject: [PATCH 73/77] feat(store): optimize account updates (#1567) --- CHANGELOG.md | 57 +- .../store/src/db/models/queries/accounts.rs | 534 ++++++++++---- .../src/db/models/queries/accounts/delta.rs | 243 ++++++ .../db/models/queries/accounts/delta/tests.rs | 691 ++++++++++++++++++ .../src/db/models/queries/accounts/tests.rs | 303 +++++++- crates/store/src/db/models/queries/mod.rs | 2 + crates/store/src/db/tests.rs | 55 +- crates/store/src/inner_forest/mod.rs | 210 +++--- crates/store/src/inner_forest/tests.rs | 50 ++ 9 files changed, 1814 insertions(+), 331 deletions(-) create mode 100644 crates/store/src/db/models/queries/accounts/delta.rs create mode 100644 crates/store/src/db/models/queries/accounts/delta/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a23eb125..cc1cef52c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,34 +79,35 @@ ### Enhancements -- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/node/issues/1304)). -- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/node/pull/1381)). -- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/node/pull/1383)). -- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/node/pull/1392)). -- Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/node/pull/1410)). -- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/node/pull/1419)). -- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/node/pull/1420)). -- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/node/pull/1433)). -- Added pagination to `GetNetworkAccountIds` store endpoint ([#1452](https://github.com/0xMiden/node/pull/1452)). -- Integrated NTX Builder with validator via `SubmitProvenTransaction` RPC ([#1453](https://github.com/0xMiden/node/pull/1453)). -- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/node/pull/1457)). -- Added partial storage map queries to RPC ([#1428](https://github.com/0xMiden/node/pull/1428)). -- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/node/pull/1450)). -- Added validated transactions check to block validation logic in Validator ([#1460](https://github.com/0xMiden/node/pull/1460)). -- Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/node/pull/1484)). -- Added DB schema change check ([#1268](https://github.com/0xMiden/node/pull/1485)). -- Added foreign account support to validator ([#1493](https://github.com/0xMiden/node/pull/1493)). -- Decoupled ntx-builder from block-producer startup by loading network accounts asynchronously via a background task ([#1495](https://github.com/0xMiden/node/pull/1495)). -- Improved DB query performance for account queries ([#1496](https://github.com/0xMiden/node/pull/1496)). -- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/node/pull/1512)). -- Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/node/pull/1517)). -- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/node/pull/1520)). -- Pin tool versions in CI ([#1523](https://github.com/0xMiden/node/pull/1523)). -- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/node/pull/1529)). -- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/node/issues/1534)). -- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/node/pull/1569)). -- Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/node/pull/1521)). -- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/node/pull/1536)). +- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/miden-node/issues/1304)). +- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). +- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). +- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). +- Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). +- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). +- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). +- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). +- Added pagination to `GetNetworkAccountIds` store endpoint ([#1452](https://github.com/0xMiden/miden-node/pull/1452)). +- Integrated NTX Builder with validator via `SubmitProvenTransaction` RPC ([#1453](https://github.com/0xMiden/miden-node/pull/1453)). +- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). +- Added partial storage map queries to RPC ([#1428](https://github.com/0xMiden/miden-node/pull/1428)). +- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/miden-node/pull/1450)). +- Added validated transactions check to block validation logic in Validator ([#1460](https://github.com/0xMiden/miden-node/pull/1460)). +- Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). +- Added DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). +- Added foreign account support to validator ([#1493](https://github.com/0xMiden/miden-node/pull/1493)). +- Decoupled ntx-builder from block-producer startup by loading network accounts asynchronously via a background task ([#1495](https://github.com/0xMiden/miden-node/pull/1495)). +- Improved DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496)). +- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). +- Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). +- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Improve speed of account updates ([#1567](https://github.com/0xMiden/miden-node/pull/1567)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). +- Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). +- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)). ### Changes diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 1b6445c8e..f859a826a 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -19,12 +19,10 @@ use diesel::{ }; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; -use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, AccountCode, - AccountDelta, AccountId, AccountStorage, AccountStorageHeader, @@ -38,6 +36,7 @@ use miden_protocol::account::{ use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, Word}; use crate::COMPONENT; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; @@ -52,10 +51,21 @@ pub(crate) use at_block::{ select_account_vault_at_block, }; +mod delta; +use delta::{ + AccountStateForInsert, + PartialAccountState, + apply_storage_delta, + select_minimal_account_state_headers, + select_vault_balances_by_faucet_ids, +}; + #[cfg(test)] mod tests; type StorageMapValueRow = (i64, String, Vec, Vec); +type StorageHeaderWithEntries = + (AccountStorageHeader, BTreeMap>); // NETWORK ACCOUNT TYPE // ================================================================================================ @@ -162,7 +172,7 @@ pub(crate) fn select_account( /// `State` which contains an `SmtForest` to serve the latest and most recent /// historical data. // TODO: remove eventually once refactoring is complete -fn select_full_account( +pub(crate) fn select_full_account( conn: &mut SqliteConnection, account_id: AccountId, ) -> Result { @@ -738,12 +748,41 @@ pub(crate) fn select_account_storage_map_values( /// Select latest account storage by querying `accounts.storage_header` where `is_latest=true` /// and reconstructing full storage from the header plus map values from /// `account_storage_map_values`. +/// +/// Attention: For large accounts it is prohibitively expensive! pub(crate) fn select_latest_account_storage( conn: &mut SqliteConnection, account_id: AccountId, ) -> Result { - use schema::account_storage_map_values as t; + let (storage_header, map_entries_by_slot) = + select_latest_account_storage_components(conn, account_id)?; + // Reconstruct StorageSlots from header slots + map entries + let slots = + Result::, DatabaseError>::from_iter(storage_header.slots().map(|slot_header| { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = + map_entries_by_slot.get(slot_header.name()).cloned().unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries.into_iter())?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + Ok(slot) + }))?; + Ok(AccountStorage::new(slots)?) +} + +/// Fetch account storage header and all storage maps +pub(crate) fn select_latest_account_storage_components( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { let account_id_bytes = account_id.to_bytes(); // Query storage header blob for this account where is_latest = true @@ -755,51 +794,95 @@ pub(crate) fn select_latest_account_storage( .optional()? .flatten(); - let Some(blob) = storage_blob else { - // No storage means empty storage - return Ok(AccountStorage::new(Vec::new())?); + let header = match storage_blob { + Some(blob) => AccountStorageHeader::read_from_bytes(&blob)?, + None => AccountStorageHeader::new(Vec::new())?, }; - // Deserialize the AccountStorageHeader from the blob - let header = AccountStorageHeader::read_from_bytes(&blob)?; + let entries = select_latest_storage_map_entries_all(conn, &account_id)?; + Ok((header, entries)) +} + +// TODO this is expensive and should only be called from tests +fn select_latest_storage_map_entries_all( + conn: &mut SqliteConnection, + account_id: &AccountId, +) -> Result>, DatabaseError> { + use schema::account_storage_map_values as t; - // Query all latest map values for this account let map_values: Vec<(String, Vec, Vec)> = SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) - .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::account_id.eq(&account_id.to_bytes())) .filter(t::is_latest.eq(true)) .load(conn)?; - // Group map values by slot name - let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + group_storage_map_entries(map_values) +} + +fn select_latest_storage_map_entries_for_slots( + conn: &mut SqliteConnection, + account_id: &AccountId, + slot_names: &[StorageSlotName], +) -> Result>, DatabaseError> { + use schema::account_storage_map_values as t; + + if slot_names.is_empty() { + return Ok(BTreeMap::new()); + } + + if let [slot_name] = slot_names { + let entries = select_latest_storage_map_entries_for_slot(conn, account_id, slot_name)?; + if entries.is_empty() { + return Ok(BTreeMap::new()); + } + + let mut map_entries = BTreeMap::new(); + map_entries.insert(slot_name.clone(), entries); + return Ok(map_entries); + } + + let slot_names = Vec::from_iter(slot_names.iter().cloned().map(StorageSlotName::to_raw_sql)); + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id.to_bytes())) + .filter(t::is_latest.eq(true)) + .filter(t::slot_name.eq_any(slot_names)) + .load(conn)?; + + group_storage_map_entries(map_values) +} + +fn select_latest_storage_map_entries_for_slot( + conn: &mut SqliteConnection, + account_id: &AccountId, + slot_name: &StorageSlotName, +) -> Result, DatabaseError> { + use schema::account_storage_map_values as t; + + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id.to_bytes())) + .filter(t::is_latest.eq(true)) + .filter(t::slot_name.eq(slot_name.clone().to_raw_sql())) + .load(conn)?; + + Ok(group_storage_map_entries(map_values)?.remove(slot_name).unwrap_or_default()) +} + +fn group_storage_map_entries( + map_values: Vec<(String, Vec, Vec)>, +) -> Result>, DatabaseError> { + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); for (slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; let key = Word::read_from_bytes(&key_bytes)?; let value = Word::read_from_bytes(&value_bytes)?; - map_entries_by_slot.entry(slot_name).or_default().push((key, value)); - } - - // Reconstruct StorageSlots from header slots + map entries - let mut slots = Vec::new(); - for slot_header in header.slots() { - let slot = match slot_header.slot_type() { - StorageSlotType::Value => { - // For value slots, the header value IS the slot value - StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) - }, - StorageSlotType::Map => { - // For map slots, reconstruct from map entries - let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); - let storage_map = StorageMap::with_entries(entries)?; - StorageSlot::with_map(slot_header.name().clone(), storage_map) - }, - }; - slots.push(slot); + map_entries_by_slot.entry(slot_name).or_default().insert(key, value); } - Ok(AccountStorage::new(slots)?) + Ok(map_entries_by_slot) } // ACCOUNT MUTATION @@ -940,8 +1023,171 @@ pub(crate) fn insert_account_storage_map_value( Ok(update_count + insert_count) } +type PendingStorageInserts = Vec<(AccountId, StorageSlotName, Word, Word)>; +type PendingAssetInserts = Vec<(AccountId, AssetVaultKey, Option)>; + +fn prepare_full_account_update( + update: &BlockAccountUpdate, + account: Account, +) -> Result<(AccountStateForInsert, PendingStorageInserts, PendingAssetInserts), DatabaseError> { + let account_id = account.id(); + + // sanity check the commitment of account matches the final state commitment + if account.to_commitment() != update.final_state_commitment() { + return Err(DatabaseError::AccountCommitmentsMismatch { + calculated: account.to_commitment(), + expected: update.final_state_commitment(), + }); + } + + // collect storage-map inserts to apply after account upsert + let mut storage = Vec::new(); + for slot in account.storage().slots() { + if let StorageSlotContent::Map(storage_map) = slot.content() { + for (key, value) in storage_map.entries() { + storage.push((account_id, slot.name().clone(), *key, *value)); + } + } + } + + // collect vault-asset inserts to apply after account upsert + let mut assets = Vec::new(); + for asset in account.vault().assets() { + // Only insert assets with non-zero values for fungible assets + let should_insert = match asset { + Asset::Fungible(fungible) => fungible.amount() > 0, + Asset::NonFungible(_) => true, + }; + if should_insert { + assets.push((account_id, asset.vault_key(), Some(asset))); + } + } + + Ok((AccountStateForInsert::FullAccount(account), storage, assets)) +} + +/// Prepare partial delta data for account upserts and follow-up storage and vault inserts. +fn prepare_partial_account_update( + conn: &mut SqliteConnection, + update: &BlockAccountUpdate, + account_id: AccountId, + delta: &miden_protocol::account::delta::AccountDelta, + block_num: BlockNumber, +) -> Result<(AccountStateForInsert, PendingStorageInserts, PendingAssetInserts), DatabaseError> { + // Build the minimal account state needed for partial delta application. + // Only load the storage map entries and vault balances that will receive updates. + // The next line fetches the header, which will always change unless the delta is empty. + let state_headers = select_minimal_account_state_headers(conn, account_id)?; + + // --- Process asset updates. --------------------------------- + // Only query balances for faucet_ids that are being updated. + let faucet_ids = + Vec::from_iter(delta.vault().fungible().iter().map(|(faucet_id, _)| *faucet_id)); + let prev_balances = select_vault_balances_by_faucet_ids(conn, account_id, &faucet_ids)?; + + // Encode `Some` as update and `None` as removal. + let mut assets = Vec::new(); + + // Update fungible assets. + for (faucet_id, amount_delta) in delta.vault().fungible().iter() { + let prev_amount = prev_balances.get(faucet_id).copied().unwrap_or(0); + let prev_asset = FungibleAsset::new(*faucet_id, prev_amount)?; + let amount_abs = amount_delta.unsigned_abs(); + let delta = FungibleAsset::new(*faucet_id, amount_abs)?; + let new_balance = if *amount_delta < 0 { + prev_asset.sub(delta)? + } else { + prev_asset.add(delta)? + }; + let update_or_remove = if new_balance.amount() == 0 { + None + } else { + Some(Asset::from(new_balance)) + }; + assets.push((account_id, new_balance.vault_key(), update_or_remove)); + } + + // Update non-fungible assets. + for (asset, delta_action) in delta.vault().non_fungible().iter() { + let asset_update = match delta_action { + NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), + NonFungibleDeltaAction::Remove => None, + }; + assets.push((account_id, asset.vault_key(), asset_update)); + } + + // --- Collect storage map updates. --------------------------- + + let mut storage = Vec::new(); + for (slot_name, map_delta) in delta.storage().maps() { + for (key, value) in map_delta.entries() { + storage.push((account_id, slot_name.clone(), (*key).into(), *value)); + } + } + + // First collect entries that have associated changes. + let slot_names = Vec::from_iter(delta.storage().maps().filter_map(|(slot_name, map_delta)| { + if map_delta.is_empty() { + None + } else { + Some(slot_name.clone()) + } + })); + + let map_entries = select_latest_storage_map_entries_for_slots(conn, &account_id, &slot_names)?; + + // Apply the delta storage to the given storage header. + let new_storage_header = + apply_storage_delta(&state_headers.storage_header, delta.storage(), &map_entries)?; + + // --- Update the vault root by constructing the asset vault from DB. + let new_vault_root = { + let (_last_block, assets) = + select_account_vault_assets(conn, account_id, BlockNumber::GENESIS..=block_num)?; + let assets: Vec = assets.into_iter().filter_map(|entry| entry.asset).collect(); + let mut vault = AssetVault::new(&assets)?; + vault.apply_delta(delta.vault())?; + vault.root() + }; + + // --- Compute updated account state for the accounts row. --- + // Apply nonce delta. + let new_nonce_value = state_headers + .nonce + .as_int() + .checked_add(delta.nonce_delta().as_int()) + .ok_or_else(|| { + DatabaseError::DataCorrupted(format!("Nonce overflow for account {account_id}")) + })?; + let new_nonce = Felt::new(new_nonce_value); + + // Create minimal account state data for the row insert. + let account_state = PartialAccountState { + nonce: new_nonce, + code_commitment: state_headers.code_commitment, + storage_header: new_storage_header, + vault_root: new_vault_root, + }; + + let account_header = miden_protocol::account::AccountHeader::new( + account_id, + account_state.nonce, + account_state.vault_root, + account_state.storage_header.to_commitment(), + account_state.code_commitment, + ); + + if account_header.to_commitment() != update.final_state_commitment() { + return Err(DatabaseError::AccountCommitmentsMismatch { + calculated: account_header.to_commitment(), + expected: update.final_state_commitment(), + }); + } + + Ok((AccountStateForInsert::PartialState(account_state), storage, assets)) +} + /// Attention: Assumes the account details are NOT null! The schema explicitly allows this though! -#[expect(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -965,7 +1211,7 @@ pub(crate) fn upsert_accounts( }; // Preserve the original creation block when updating existing accounts. - let created_at_block = QueryDsl::select( + let created_at_block_raw = QueryDsl::select( schema::accounts::table.filter( schema::accounts::account_id .eq(&account_id_bytes) @@ -977,95 +1223,34 @@ pub(crate) fn upsert_accounts( .optional() .map_err(DatabaseError::Diesel)? .unwrap_or(block_num_raw); + let created_at_block = BlockNumber::from_raw_sql(created_at_block_raw)?; - // NOTE: we collect storage / asset inserts to apply them only after the account row is - // written. The storage and vault tables have FKs pointing to `accounts (account_id, + // NOTE: we collect storage / asset inserts to apply them only after the account row is + // written. The storage and vault tables have FKs pointing to accounts `(account_id, // block_num)`, so inserting them earlier would violate those constraints when inserting a // brand-new account. - let (full_account, pending_storage_inserts, pending_asset_inserts) = match update.details() + let (account_state, pending_storage_inserts, pending_asset_inserts) = match update.details() { - AccountUpdateDetails::Private => (None, vec![], vec![]), + AccountUpdateDetails::Private => (AccountStateForInsert::Private, vec![], vec![]), + // New account is always a full account, but also comes as an update AccountUpdateDetails::Delta(delta) if delta.is_full_state() => { - let account = Account::try_from(delta)?; + let account = Account::try_from(delta) + .expect("Delta to full account always works for full state deltas"); debug_assert_eq!(account_id, account.id()); - if account.to_commitment() != update.final_state_commitment() { - return Err(DatabaseError::AccountCommitmentsMismatch { - calculated: account.to_commitment(), - expected: update.final_state_commitment(), - }); - } - - // collect storage-map inserts to apply after account upsert - let mut storage = Vec::new(); - for slot in account.storage().slots() { - if let StorageSlotContent::Map(storage_map) = slot.content() { - for (key, value) in storage_map.entries() { - storage.push((account_id, slot.name().clone(), *key, *value)); - } - } - } - - // collect vault-asset inserts to apply after account upsert - let mut assets = Vec::new(); - for asset in account.vault().assets() { - // Only insert assets with non-zero values for fungible assets - let should_insert = match asset { - Asset::Fungible(fungible) => fungible.amount() > 0, - Asset::NonFungible(_) => true, - }; - if should_insert { - assets.push((account_id, asset.vault_key(), Some(asset))); - } - } - - (Some(account), storage, assets) + prepare_full_account_update(update, account)? }, + // Update of an existing account AccountUpdateDetails::Delta(delta) => { - // Reconstruct the full account from database tables - let account = select_full_account(conn, account_id)?; - - // --- collect storage map updates ---------------------------- - - let mut storage = Vec::new(); - for (slot_name, map_delta) in delta.storage().maps() { - for (key, value) in map_delta.entries() { - storage.push((account_id, slot_name.clone(), (*key).into(), *value)); - } - } - - // apply delta to the account; we need to do this before we process asset updates - // because we currently need to get the current value of fungible assets from the - // account - let account_after = apply_delta(account, delta, &update.final_state_commitment())?; - - // --- process asset updates ---------------------------------- - - let mut assets = Vec::new(); - - for (faucet_id, _) in delta.vault().fungible().iter() { - let current_amount = account_after.vault().get_balance(*faucet_id).unwrap(); - let asset: Asset = FungibleAsset::new(*faucet_id, current_amount)?.into(); - let update_or_remove = if current_amount == 0 { None } else { Some(asset) }; - - assets.push((account_id, asset.vault_key(), update_or_remove)); - } - - for (asset, delta_action) in delta.vault().non_fungible().iter() { - let asset_update = match delta_action { - NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), - NonFungibleDeltaAction::Remove => None, - }; - assets.push((account_id, asset.vault_key(), asset_update)); - } - - (Some(account_after), storage, assets) + prepare_partial_account_update(conn, update, account_id, delta, block_num)? }, }; - if let Some(code) = full_account.as_ref().map(Account::code) { + // Insert account _code_ for full accounts (new account creation) + if let AccountStateForInsert::FullAccount(ref account) = account_state { + let code = account.code(); let code_value = AccountCodeRowInsert { code_commitment: code.commitment().to_bytes(), code: code.to_bytes(), @@ -1087,22 +1272,30 @@ pub(crate) fn upsert_accounts( .set(schema::accounts::is_latest.eq(false)) .execute(conn)?; - let account_value = AccountRowInsert { - account_id: account_id_bytes, - network_account_type: network_account_type.to_raw_sql(), - account_commitment: update.final_state_commitment().to_bytes(), - block_num: block_num_raw, - nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), - code_commitment: full_account - .as_ref() - .map(|account| account.code().commitment().to_bytes()), - // Store only the header (slot metadata + map roots), not full storage with map contents - storage_header: full_account - .as_ref() - .map(|account| account.storage().to_header().to_bytes()), - vault_root: full_account.as_ref().map(|account| account.vault().root().to_bytes()), - is_latest: true, - created_at_block, + let account_value = match &account_state { + AccountStateForInsert::Private => AccountRowInsert::new_private( + account_id, + network_account_type, + update.final_state_commitment(), + block_num, + created_at_block, + ), + AccountStateForInsert::FullAccount(account) => AccountRowInsert::new_from_account( + account_id, + network_account_type, + update.final_state_commitment(), + block_num, + created_at_block, + account, + ), + AccountStateForInsert::PartialState(state) => AccountRowInsert::new_from_partial( + account_id, + network_account_type, + update.final_state_commitment(), + block_num, + created_at_block, + state, + ), }; diesel::insert_into(schema::accounts::table) @@ -1128,25 +1321,6 @@ pub(crate) fn upsert_accounts( Ok(count) } -/// Deserializes account and applies account delta. -pub(crate) fn apply_delta( - mut account: Account, - delta: &AccountDelta, - final_state_commitment: &Word, -) -> crate::db::Result { - account.apply_delta(delta)?; - - let actual_commitment = account.to_commitment(); - if &actual_commitment != final_state_commitment { - return Err(DatabaseError::AccountCommitmentsMismatch { - calculated: actual_commitment, - expected: *final_state_commitment, - }); - } - - Ok(account) -} - #[derive(Insertable, Debug, Clone)] #[diesel(table_name = schema::account_codes)] pub(crate) struct AccountCodeRowInsert { @@ -1169,6 +1343,76 @@ pub(crate) struct AccountRowInsert { pub(crate) created_at_block: i64, } +impl AccountRowInsert { + /// Creates an insert row for a private account (no public state). + fn new_private( + account_id: AccountId, + network_account_type: NetworkAccountType, + account_commitment: Word, + block_num: BlockNumber, + created_at_block: BlockNumber, + ) -> Self { + Self { + account_id: account_id.to_bytes(), + network_account_type: network_account_type.to_raw_sql(), + account_commitment: account_commitment.to_bytes(), + block_num: block_num.to_raw_sql(), + nonce: None, + code_commitment: None, + storage_header: None, + vault_root: None, + is_latest: true, + created_at_block: created_at_block.to_raw_sql(), + } + } + + /// Creates an insert row from a full account (new account creation). + fn new_from_account( + account_id: AccountId, + network_account_type: NetworkAccountType, + account_commitment: Word, + block_num: BlockNumber, + created_at_block: BlockNumber, + account: &Account, + ) -> Self { + Self { + account_id: account_id.to_bytes(), + network_account_type: network_account_type.to_raw_sql(), + account_commitment: account_commitment.to_bytes(), + block_num: block_num.to_raw_sql(), + nonce: Some(nonce_to_raw_sql(account.nonce())), + code_commitment: Some(account.code().commitment().to_bytes()), + storage_header: Some(account.storage().to_header().to_bytes()), + vault_root: Some(account.vault().root().to_bytes()), + is_latest: true, + created_at_block: created_at_block.to_raw_sql(), + } + } + + /// Creates an insert row from a partial account state (delta update). + fn new_from_partial( + account_id: AccountId, + network_account_type: NetworkAccountType, + account_commitment: Word, + block_num: BlockNumber, + created_at_block: BlockNumber, + state: &PartialAccountState, + ) -> Self { + Self { + account_id: account_id.to_bytes(), + network_account_type: network_account_type.to_raw_sql(), + account_commitment: account_commitment.to_bytes(), + block_num: block_num.to_raw_sql(), + nonce: Some(nonce_to_raw_sql(state.nonce)), + code_commitment: Some(state.code_commitment.to_bytes()), + storage_header: Some(state.storage_header.to_bytes()), + vault_root: Some(state.vault_root.to_bytes()), + is_latest: true, + created_at_block: created_at_block.to_raw_sql(), + } + } +} + #[derive(Insertable, AsChangeset, Debug, Clone)] #[diesel(table_name = schema::account_vault_assets)] pub(crate) struct AccountAssetRowInsert { diff --git a/crates/store/src/db/models/queries/accounts/delta.rs b/crates/store/src/db/models/queries/accounts/delta.rs new file mode 100644 index 000000000..7a554130c --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/delta.rs @@ -0,0 +1,243 @@ +//! Optimized delta update support for account updates. +//! +//! Provides functions and types for applying partial delta updates to accounts +//! without loading the full account state. Avoids loading: +//! - Full account code bytes +//! - All storage map entries +//! - All vault assets +//! +//! Instead, only the minimal data needed for the update is fetched. + +use std::collections::BTreeMap; + +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; +use miden_protocol::account::delta::AccountStorageDelta; +use miden_protocol::account::{ + Account, + AccountId, + AccountStorageHeader, + StorageMap, + StorageSlotHeader, + StorageSlotName, +}; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{EMPTY_WORD, Felt, Word}; + +use crate::db::models::conv::raw_sql_to_nonce; +use crate::db::schema; +use crate::errors::DatabaseError; + +#[cfg(test)] +mod tests; + +// TYPES +// ================================================================================================ + +/// Raw row type for account state delta queries. +/// +/// Fields: (`nonce`, `code_commitment`, `storage_header`) +#[derive(diesel::prelude::Queryable)] +struct AccountStateDeltaRow { + nonce: Option, + code_commitment: Option>, + storage_header: Option>, +} + +/// Data needed for applying a delta update to an existing account. +/// Fetches only the minimal data required, avoiding loading full code and storage. +#[derive(Debug, Clone)] +pub(super) struct AccountStateHeadersForDelta { + pub nonce: Felt, + pub code_commitment: Word, + pub storage_header: AccountStorageHeader, +} + +/// Minimal account state computed from a partial delta update. +/// Contains only the fields needed for the accounts table row insert. +#[derive(Debug, Clone)] +pub(super) struct PartialAccountState { + pub nonce: Felt, + pub code_commitment: Word, + pub storage_header: AccountStorageHeader, + pub vault_root: Word, +} + +/// Represents the account state to be inserted, either from a full account +/// or from a partial delta update. +pub(super) enum AccountStateForInsert { + /// Private account - no public state stored + Private, + /// Full account state (from full-state delta, i.e., new account) + FullAccount(Account), + /// Partial account state (from partial delta, i.e., existing account update) + PartialState(PartialAccountState), +} + +// QUERIES +// ================================================================================================ + +/// Selects the minimal account state needed for applying a delta update. +/// +/// Optimized query that only fetches: +/// - `nonce` (to add `nonce_delta`) +/// - `code_commitment` (unchanged in partial deltas) +/// - `storage_header` (to apply storage delta) +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT nonce, code_commitment, storage_header +/// FROM accounts +/// WHERE account_id = ?1 AND is_latest = 1 +/// ``` +pub(super) fn select_minimal_account_state_headers( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + let row: AccountStateDeltaRow = SelectDsl::select( + schema::accounts::table, + ( + schema::accounts::nonce, + schema::accounts::code_commitment, + schema::accounts::storage_header, + ), + ) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + + let nonce = raw_sql_to_nonce(row.nonce.ok_or_else(|| { + DatabaseError::DataCorrupted(format!("No nonce found for account {account_id}")) + })?); + + let code_commitment = row + .code_commitment + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .ok_or_else(|| { + DatabaseError::DataCorrupted(format!( + "No code_commitment found for account {account_id}" + )) + })?; + + let storage_header = match row.storage_header { + Some(bytes) => AccountStorageHeader::read_from_bytes(&bytes)?, + None => AccountStorageHeader::new(Vec::new())?, + }; + + Ok(AccountStateHeadersForDelta { nonce, code_commitment, storage_header }) +} + +/// Selects vault balances for specific faucet IDs. +/// +/// Optimized query that only fetches balances for the faucet IDs +/// that are being updated by a delta, rather than loading all vault assets. +/// +/// Returns a map from `faucet_id` to the current balance (0 if not found). +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT vault_key, asset +/// FROM account_vault_assets +/// WHERE account_id = ?1 AND is_latest = 1 AND vault_key IN (?2, ?3, ...) +/// ``` +pub(super) fn select_vault_balances_by_faucet_ids( + conn: &mut SqliteConnection, + account_id: AccountId, + faucet_ids: &[AccountId], +) -> Result, DatabaseError> { + use schema::account_vault_assets as vault; + + if faucet_ids.is_empty() { + return Ok(BTreeMap::new()); + } + + let account_id_bytes = account_id.to_bytes(); + + // Compute vault keys for each faucet ID + let vault_keys: Vec> = Result::from_iter(faucet_ids.iter().map(|faucet_id| { + let asset = FungibleAsset::new(*faucet_id, 0) + .map_err(|_| DatabaseError::DataCorrupted(format!("Invalid faucet id {faucet_id}")))?; + let key: Word = asset.vault_key().into(); + Ok::<_, DatabaseError>(key.to_bytes()) + }))?; + + let entries: Vec<(Vec, Option>)> = + SelectDsl::select(vault::table, (vault::vault_key, vault::asset)) + .filter(vault::account_id.eq(&account_id_bytes)) + .filter(vault::is_latest.eq(true)) + .filter(vault::vault_key.eq_any(&vault_keys)) + .load(conn)?; + + let mut balances = BTreeMap::from_iter(faucet_ids.iter().map(|faucet_id| (*faucet_id, 0))); + + for (_vault_key_bytes, maybe_asset_bytes) in entries { + if let Some(asset_bytes) = maybe_asset_bytes { + let asset = Asset::read_from_bytes(&asset_bytes)?; + if let Asset::Fungible(fungible) = asset { + balances.insert(fungible.faucet_id(), fungible.amount()); + } + } + } + + Ok(balances) +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Applies storage delta to an existing storage header using precomputed map roots. +/// +/// For value slots, updates the slot value directly. +/// For map slots, uses the precomputed roots for updated maps. +pub(super) fn apply_storage_delta( + header: &AccountStorageHeader, + delta: &AccountStorageDelta, + map_entries: &BTreeMap>, +) -> Result { + let mut value_updates: BTreeMap<&StorageSlotName, Word> = BTreeMap::new(); + let mut map_updates: BTreeMap<&StorageSlotName, Word> = BTreeMap::new(); + + for (slot_name, new_value) in delta.values() { + value_updates.insert(slot_name, *new_value); + } + + for (slot_name, map_delta) in delta.maps() { + if map_delta.is_empty() { + continue; + } + + let mut entries = map_entries.get(slot_name).cloned().unwrap_or_default(); + for (key, value) in map_delta.entries() { + if *value == EMPTY_WORD { + entries.remove(&(*key).into()); + } else { + entries.insert((*key).into(), *value); + } + } + + let storage_map = StorageMap::with_entries(entries.into_iter()) + .map_err(DatabaseError::StorageMapError)?; + map_updates.insert(slot_name, storage_map.root()); + } + + let slots = Vec::from_iter(header.slots().map(|slot| { + let slot_name = slot.name(); + if let Some(&new_value) = value_updates.get(slot_name) { + StorageSlotHeader::new(slot_name.clone(), slot.slot_type(), new_value) + } else if let Some(&new_root) = map_updates.get(slot_name) { + StorageSlotHeader::new(slot_name.clone(), slot.slot_type(), new_root) + } else { + slot.clone() + } + })); + + AccountStorageHeader::new(slots).map_err(|e| { + DatabaseError::DataCorrupted(format!("Failed to create storage header: {e:?}")) + }) +} diff --git a/crates/store/src/db/models/queries/accounts/delta/tests.rs b/crates/store/src/db/models/queries/accounts/delta/tests.rs new file mode 100644 index 000000000..37e4db1f8 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/delta/tests.rs @@ -0,0 +1,691 @@ +//! +//! Tests for delta update functionality. + +use std::collections::BTreeMap; + +use assert_matches::assert_matches; +use diesel::{Connection, ExpressionMethods, QueryDsl, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use miden_node_utils::fee::test_fee_params; +use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; +use miden_protocol::account::component::AccountComponentMetadata; +use miden_protocol::account::delta::{ + AccountStorageDelta, + AccountUpdateDetails, + AccountVaultDelta, + StorageMapDelta, + StorageSlotDelta, +}; +use miden_protocol::account::{ + AccountBuilder, + AccountComponent, + AccountDelta, + AccountId, + AccountStorageMode, + AccountType, + StorageMap, + StorageSlot, + StorageSlotName, +}; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, + ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET_1, +}; +use miden_protocol::utils::Serializable; +use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_standards::account::auth::AuthSingleSig; +use miden_standards::code_builder::CodeBuilder; + +use crate::db::migrations::MIGRATIONS; +use crate::db::models::queries::accounts::{ + select_account_header_with_storage_header_at_block, + select_account_vault_at_block, + select_full_account, + upsert_accounts, +}; +use crate::db::schema::accounts; + +fn setup_test_db() -> SqliteConnection { + let mut conn = + SqliteConnection::establish(":memory:").expect("Failed to create in-memory database"); + + conn.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations"); + + conn +} + +fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { + use crate::db::schema::block_headers; + + let secret_key = SecretKey::new(); + let block_header = BlockHeader::new( + 1_u8.into(), + Word::default(), + block_num, + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + secret_key.public_key(), + test_fee_params(), + 0_u8.into(), + ); + let signature = secret_key.sign(block_header.commitment()); + + diesel::insert_into(block_headers::table) + .values(( + block_headers::block_num.eq(i64::from(block_num.as_u32())), + block_headers::block_header.eq(block_header.to_bytes()), + block_headers::signature.eq(signature.to_bytes()), + block_headers::commitment.eq(block_header.commitment().to_bytes()), + )) + .execute(conn) + .expect("Failed to insert block header"); +} + +/// Tests that the optimized delta update path produces the same results as the old +/// method that loads the full account. +/// +/// Covers partial deltas that update: +/// - Nonce (via `nonce_delta`) +/// - Value storage slots +/// - Vault assets (fungible) starting from empty vault +/// +/// The test ensures the optimized code path in `upsert_accounts` produces correct results +/// by comparing the final account state against a manually constructed expected state. +#[test] +#[expect( + clippy::too_many_lines, + reason = "test exercises multiple storage and vault paths" +)] +fn optimized_delta_matches_full_account_method() { + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_SEED: [u8; 32] = [10u8; 32]; + // Use fixed block numbers to ensure deterministic ordering. + const BLOCK_NUM_1: u32 = 1; + const BLOCK_NUM_2: u32 = 2; + // Use explicit slot indices to avoid magic numbers. + const SLOT_INDEX_PRIMARY: usize = 0; + const SLOT_INDEX_SECONDARY: usize = 1; + // Use fixed values to verify storage delta updates. + const INITIAL_SLOT_VALUES: [u64; 4] = [100, 200, 300, 400]; + const UPDATED_SLOT_VALUES: [u64; 4] = [111, 222, 333, 444]; + // Use fixed delta values to validate nonce and vault changes. + const NONCE_DELTA: u64 = 5; + const VAULT_AMOUNT: u64 = 500; + + let mut conn = setup_test_db(); + + // Create an account with value slots only (no map slots to avoid SmtForest complexity) + let slot_value_initial = Word::from([ + Felt::new(INITIAL_SLOT_VALUES[0]), + Felt::new(INITIAL_SLOT_VALUES[1]), + Felt::new(INITIAL_SLOT_VALUES[2]), + Felt::new(INITIAL_SLOT_VALUES[3]), + ]); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX_PRIMARY), slot_value_initial), + StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX_SECONDARY), EMPTY_WORD), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + let block_1 = BlockNumber::from(BLOCK_NUM_1); + let block_2 = BlockNumber::from(BLOCK_NUM_2); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + // Insert the initial account at block 1 (full state) - no vault assets + let delta_initial = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_initial = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta_initial), + ); + upsert_accounts(&mut conn, &[account_update_initial], block_1).expect("Initial upsert failed"); + + // Verify initial state + let full_account_before = + select_full_account(&mut conn, account.id()).expect("Failed to load full account"); + assert_eq!(full_account_before.nonce(), account.nonce()); + assert!( + full_account_before.vault().assets().next().is_none(), + "Vault should be empty initially" + ); + + // Create a partial delta to apply: + // - Increment nonce by 5 + // - Update the first value slot + // - Add 500 tokens to the vault (starting from empty) + + let new_slot_value = Word::from([ + Felt::new(UPDATED_SLOT_VALUES[0]), + Felt::new(UPDATED_SLOT_VALUES[1]), + Felt::new(UPDATED_SLOT_VALUES[2]), + Felt::new(UPDATED_SLOT_VALUES[3]), + ]); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Find the slot name from the account's storage + let value_slot_name = + full_account_before.storage().slots().iter().next().unwrap().name().clone(); + + // Build the storage delta (value slot update only) + let storage_delta = { + let deltas = BTreeMap::from_iter([( + value_slot_name.clone(), + StorageSlotDelta::Value(new_slot_value), + )]); + AccountStorageDelta::from_raw(deltas) + }; + + // Build the vault delta (add 500 tokens to empty vault) + let vault_delta = { + let mut delta = AccountVaultDelta::default(); + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, VAULT_AMOUNT).unwrap()); + delta.add_asset(asset).unwrap(); + delta + }; + + // Create a partial delta + let nonce_delta = Felt::new(NONCE_DELTA); + let partial_delta = AccountDelta::new( + full_account_before.id(), + storage_delta.clone(), + vault_delta.clone(), + nonce_delta, + ) + .unwrap(); + assert!(!partial_delta.is_full_state(), "Delta should be partial, not full state"); + + // Construct the expected final account by applying the delta + let expected_nonce = Felt::new(full_account_before.nonce().as_int() + nonce_delta.as_int()); + let expected_code_commitment = full_account_before.code().commitment(); + + let mut expected_account = full_account_before.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let final_account_for_commitment = expected_account; + + let final_commitment = final_account_for_commitment.to_commitment(); + let expected_storage_commitment = final_account_for_commitment.storage().to_commitment(); + let expected_vault_root = final_account_for_commitment.vault().root(); + + // ----- Apply the partial delta via upsert_accounts (optimized path) ----- + let account_update = BlockAccountUpdate::new( + account.id(), + final_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + upsert_accounts(&mut conn, &[account_update], block_2).expect("Partial delta upsert failed"); + + // ----- VERIFY: Query the DB and check that optimized path produced correct results ----- + + let (header_after, storage_header_after) = + select_account_header_with_storage_header_at_block(&mut conn, account.id(), block_2) + .expect("Query should succeed") + .expect("Account should exist"); + + // Verify nonce + assert_eq!( + header_after.nonce(), + expected_nonce, + "Nonce mismatch: optimized={:?}, expected={:?}", + header_after.nonce(), + expected_nonce + ); + + // Verify code commitment (should be unchanged) + assert_eq!( + header_after.code_commitment(), + expected_code_commitment, + "Code commitment mismatch" + ); + + // Verify storage header commitment + assert_eq!( + storage_header_after.to_commitment(), + expected_storage_commitment, + "Storage header commitment mismatch" + ); + + // Verify vault assets + let vault_assets_after = select_account_vault_at_block(&mut conn, account.id(), block_2) + .expect("Query vault should succeed"); + + assert_eq!(vault_assets_after.len(), 1, "Should have 1 vault asset"); + assert_matches!(&vault_assets_after[0], Asset::Fungible(f) => { + assert_eq!(f.faucet_id(), faucet_id, "Faucet ID should match"); + assert_eq!(f.amount(), VAULT_AMOUNT, "Amount should be 500"); + }); + + // Verify the account commitment matches + assert_eq!( + header_after.to_commitment(), + final_commitment, + "Account commitment should match the expected final state" + ); + + // Also verify we can load the full account and it has correct state + let full_account_after = select_full_account(&mut conn, account.id()) + .expect("Failed to load full account after update"); + + assert_eq!(full_account_after.nonce(), expected_nonce, "Full account nonce mismatch"); + assert_eq!( + full_account_after.storage().to_commitment(), + expected_storage_commitment, + "Full account storage commitment mismatch" + ); + assert_eq!( + full_account_after.vault().root(), + expected_vault_root, + "Full account vault root mismatch" + ); +} + +#[test] +fn optimized_delta_updates_non_empty_vault() { + const ACCOUNT_SEED: [u8; 32] = [40u8; 32]; + const BLOCK_NUM_1: u32 = 1; + const BLOCK_NUM_2: u32 = 2; + const NONCE_DELTA: u64 = 1; + const INITIAL_AMOUNT: u64 = 700; + const ADDED_AMOUNT: u64 = 250; + const SLOT_INDEX: usize = 0; + + let mut conn = setup_test_db(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let faucet_id_1 = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET_1).unwrap(); + let initial_asset = Asset::Fungible(FungibleAsset::new(faucet_id, INITIAL_AMOUNT).unwrap()); + + let component_storage = + vec![StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX), EMPTY_WORD)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc vault push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .with_assets([initial_asset]) + .build_existing() + .unwrap(); + + let block_1 = BlockNumber::from(BLOCK_NUM_1); + let block_2 = BlockNumber::from(BLOCK_NUM_2); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + let delta_initial = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_initial = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta_initial), + ); + upsert_accounts(&mut conn, &[account_update_initial], block_1).expect("Initial upsert failed"); + + let full_account_before = + select_full_account(&mut conn, account.id()).expect("Failed to load full account"); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(Asset::Fungible(FungibleAsset::new(faucet_id_1, ADDED_AMOUNT).unwrap())) + .unwrap(); + vault_delta + .remove_asset(Asset::Fungible(FungibleAsset::new(faucet_id, INITIAL_AMOUNT).unwrap())) + .unwrap(); + + let partial_delta = AccountDelta::new( + account.id(), + AccountStorageDelta::new(), + vault_delta, + Felt::new(NONCE_DELTA), + ) + .unwrap(); + + let mut expected_account = full_account_before.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let expected_commitment = expected_account.to_commitment(); + let expected_vault_root = expected_account.vault().root(); + + let account_update = BlockAccountUpdate::new( + account.id(), + expected_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + upsert_accounts(&mut conn, &[account_update], block_2).expect("Partial delta upsert failed"); + + let vault_assets_after = select_account_vault_at_block(&mut conn, account.id(), block_2) + .expect("Query vault should succeed"); + + assert_eq!(vault_assets_after.len(), 1, "Should have 1 vault asset"); + assert_matches!(&vault_assets_after[0], Asset::Fungible(f) => { + assert_eq!(f.faucet_id(), faucet_id_1, "Faucet ID should match"); + assert_eq!(f.amount(), ADDED_AMOUNT, "Amount should match"); + }); + + let full_account_after = select_full_account(&mut conn, account.id()) + .expect("Failed to load full account after update"); + + assert_eq!(full_account_after.vault().root(), expected_vault_root); + assert_eq!(full_account_after.to_commitment(), expected_commitment); +} + +#[test] +fn optimized_delta_updates_storage_map_header() { + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_SEED: [u8; 32] = [30u8; 32]; + // Use fixed block numbers to ensure deterministic ordering. + const BLOCK_NUM_1: u32 = 1; + const BLOCK_NUM_2: u32 = 2; + // Use explicit slot index to avoid magic numbers. + const SLOT_INDEX_MAP: usize = 3; + // Use fixed map values to validate root updates. + const MAP_KEY_VALUES: [u64; 4] = [7, 0, 0, 0]; + const MAP_VALUE_INITIAL: [u64; 4] = [10, 20, 30, 40]; + const MAP_VALUE_UPDATED: [u64; 4] = [50, 60, 70, 80]; + // Use nonzero nonce delta (required when storage/vault changes). + const NONCE_DELTA: u64 = 1; + + let mut conn = setup_test_db(); + + let map_key = Word::from([ + Felt::new(MAP_KEY_VALUES[0]), + Felt::new(MAP_KEY_VALUES[1]), + Felt::new(MAP_KEY_VALUES[2]), + Felt::new(MAP_KEY_VALUES[3]), + ]); + let map_value_initial = Word::from([ + Felt::new(MAP_VALUE_INITIAL[0]), + Felt::new(MAP_VALUE_INITIAL[1]), + Felt::new(MAP_VALUE_INITIAL[2]), + Felt::new(MAP_VALUE_INITIAL[3]), + ]); + let map_value_updated = Word::from([ + Felt::new(MAP_VALUE_UPDATED[0]), + Felt::new(MAP_VALUE_UPDATED[1]), + Felt::new(MAP_VALUE_UPDATED[2]), + Felt::new(MAP_VALUE_UPDATED[3]), + ]); + + let storage_map = StorageMap::with_entries(vec![(map_key, map_value_initial)]).unwrap(); + let component_storage = + vec![StorageSlot::with_map(StorageSlotName::mock(SLOT_INDEX_MAP), storage_map)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc map push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + let block_1 = BlockNumber::from(BLOCK_NUM_1); + let block_2 = BlockNumber::from(BLOCK_NUM_2); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + let delta_initial = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_initial = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta_initial), + ); + upsert_accounts(&mut conn, &[account_update_initial], block_1).expect("Initial upsert failed"); + + let full_account_before = + select_full_account(&mut conn, account.id()).expect("Failed to load full account"); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(map_key, map_value_updated); + let storage_delta = AccountStorageDelta::from_raw(BTreeMap::from_iter([( + StorageSlotName::mock(SLOT_INDEX_MAP), + StorageSlotDelta::Map(map_delta), + )])); + + let partial_delta = AccountDelta::new( + account.id(), + storage_delta, + AccountVaultDelta::default(), + Felt::new(NONCE_DELTA), + ) + .unwrap(); + + let mut expected_account = full_account_before.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let expected_commitment = expected_account.to_commitment(); + let expected_storage_commitment = expected_account.storage().to_commitment(); + + let account_update = BlockAccountUpdate::new( + account.id(), + expected_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + upsert_accounts(&mut conn, &[account_update], block_2).expect("Partial delta upsert failed"); + + let (header_after, storage_header_after) = + select_account_header_with_storage_header_at_block(&mut conn, account.id(), block_2) + .expect("Query should succeed") + .expect("Account should exist"); + + assert_eq!( + storage_header_after.to_commitment(), + expected_storage_commitment, + "Storage commitment should match after map delta" + ); + assert_eq!( + header_after.to_commitment(), + expected_commitment, + "Account commitment should match after map delta" + ); +} + +/// Tests that a private account update (no public state) is handled correctly. +/// +/// Private accounts store only the account commitment, not the full state. +#[test] +fn upsert_private_account() { + use miden_protocol::account::{AccountIdVersion, AccountStorageMode, AccountType}; + + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_ID_SEED: [u8; 15] = [20u8; 15]; + // Use fixed block number to keep test ordering deterministic. + const BLOCK_NUM: u32 = 1; + // Use fixed commitment values to validate storage behavior. + const COMMITMENT_WORDS: [u64; 4] = [1, 2, 3, 4]; + + let mut conn = setup_test_db(); + + let block_num = BlockNumber::from(BLOCK_NUM); + insert_block_header(&mut conn, block_num); + + // Create a private account ID + let account_id = AccountId::dummy( + ACCOUNT_ID_SEED, + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Private, + ); + + let account_commitment = Word::from([ + Felt::new(COMMITMENT_WORDS[0]), + Felt::new(COMMITMENT_WORDS[1]), + Felt::new(COMMITMENT_WORDS[2]), + Felt::new(COMMITMENT_WORDS[3]), + ]); + + // Insert as private account + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Private); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Private account upsert failed"); + + // Verify the account exists and commitment matches + + let (stored_commitment, stored_nonce, stored_code): (Vec, Option, Option>) = + accounts::table + .filter(accounts::account_id.eq(account_id.to_bytes())) + .filter(accounts::is_latest.eq(true)) + .select((accounts::account_commitment, accounts::nonce, accounts::code_commitment)) + .first(&mut conn) + .expect("Account should exist in DB"); + + assert_eq!( + stored_commitment, + account_commitment.to_bytes(), + "Stored commitment should match" + ); + + // Private accounts have NULL for nonce, code_commitment, storage_header, vault_root + assert!(stored_nonce.is_none(), "Private account should have NULL nonce"); + assert!(stored_code.is_none(), "Private account should have NULL code_commitment"); +} + +/// Tests that a full-state delta (new account creation) is handled correctly. +/// +/// Full-state deltas contain the complete account state including code. +#[test] +fn upsert_full_state_delta() { + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_SEED: [u8; 32] = [20u8; 32]; + // Use fixed block number to keep test ordering deterministic. + const BLOCK_NUM: u32 = 1; + // Use fixed slot values to validate storage behavior. + const SLOT_VALUES: [u64; 4] = [10, 20, 30, 40]; + // Use explicit slot index to avoid magic numbers. + const SLOT_INDEX: usize = 0; + + let mut conn = setup_test_db(); + + let block_num = BlockNumber::from(BLOCK_NUM); + insert_block_header(&mut conn, block_num); + + // Create an account with storage + let slot_value = Word::from([ + Felt::new(SLOT_VALUES[0]), + Felt::new(SLOT_VALUES[1]), + Felt::new(SLOT_VALUES[2]), + Felt::new(SLOT_VALUES[3]), + ]); + let component_storage = + vec![StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX), slot_value)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc bar push.2 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + // Create a full-state delta from the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + assert!(delta.is_full_state(), "Delta should be full state"); + + let account_update = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Full-state delta upsert failed"); + + // Verify the account state was stored correctly + let (header, storage_header) = + select_account_header_with_storage_header_at_block(&mut conn, account.id(), block_num) + .expect("Query should succeed") + .expect("Account should exist"); + + assert_eq!(header.nonce(), account.nonce(), "Nonce should match"); + assert_eq!( + header.code_commitment(), + account.code().commitment(), + "Code commitment should match" + ); + assert_eq!( + storage_header.to_commitment(), + account.storage().to_commitment(), + "Storage commitment should match" + ); + + // Verify we can load the full account back + let loaded_account = + select_full_account(&mut conn, account.id()).expect("Should load full account"); + + assert_eq!(loaded_account.nonce(), account.nonce()); + assert_eq!(loaded_account.code().commitment(), account.code().commitment()); + assert_eq!(loaded_account.storage().to_commitment(), account.storage().to_commitment()); +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 0065a1e2c..46fad7649 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -24,18 +24,23 @@ use miden_protocol::account::{ AccountId, AccountIdVersion, AccountStorage, + AccountStorageDelta, AccountStorageHeader, AccountStorageMode, AccountType, + AccountVaultDelta, StorageMap, + StorageMapDelta, StorageSlot, + StorageSlotContent, + StorageSlotDelta, StorageSlotName, StorageSlotType, }; use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; @@ -197,6 +202,56 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { .expect("Failed to insert block header"); } +fn create_account_with_map_storage( + slot_name: StorageSlotName, + entries: Vec<(Word, Word)>, +) -> Account { + let storage_map = StorageMap::with_entries(entries).unwrap(); + let component_storage = vec![StorageSlot::with_map(slot_name, storage_map)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc map push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + AccountBuilder::new([9u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap() +} + +fn assert_storage_map_slot_entries( + storage: &AccountStorage, + slot_name: &StorageSlotName, + expected: &BTreeMap, +) { + let slot = storage + .slots() + .iter() + .find(|slot| slot.name() == slot_name) + .expect("expected storage slot"); + + let StorageSlotContent::Map(storage_map) = slot.content() else { + panic!("expected map slot"); + }; + + let entries = BTreeMap::from_iter(storage_map.entries().map(|(key, value)| (*key, *value))); + assert_eq!(&entries, expected, "map entries mismatch"); +} + // ACCOUNT HEADER AT BLOCK TESTS // ================================================================================================ @@ -647,6 +702,174 @@ fn test_upsert_accounts_with_empty_storage() { ); } +// STORAGE MAP LATEST ACCOUNT QUERY TESTS +// ================================================================================================ + +#[test] +fn test_select_latest_account_storage_ordering_semantics() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let slot_name = StorageSlotName::mock(0); + let key_1 = Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_2 = Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_3 = Word::from([Felt::new(3), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let value_1 = Word::from([Felt::new(10), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_2 = Word::from([Felt::new(20), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_3 = Word::from([Felt::new(30), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let mut entries = vec![(key_2, value_2), (key_1, value_1), (key_3, value_3)]; + entries.reverse(); + + let account = create_account_with_map_storage(slot_name.clone(), entries.clone()); + let account_id = account.id(); + let account_commitment = account.to_commitment(); + + let mut reversed_entries = entries.clone(); + reversed_entries.reverse(); + let reordered_account = create_account_with_map_storage(slot_name.clone(), reversed_entries); + assert_eq!( + account.storage().to_commitment(), + reordered_account.storage().to_commitment(), + "storage commitments should be order-independent" + ); + + let delta = AccountDelta::try_from(account).unwrap(); + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + let storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + let expected = BTreeMap::from_iter(entries); + assert_storage_map_slot_entries(&storage, &slot_name, &expected); +} + +#[test] +fn test_select_latest_account_storage_multiple_slots() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let slot_name_1 = StorageSlotName::mock(0); + let slot_name_2 = StorageSlotName::mock(1); + + let key_a = Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_b = Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let value_a = Word::from([Felt::new(11), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_b = Word::from([Felt::new(22), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let map_a = StorageMap::with_entries(vec![(key_a, value_a)]).unwrap(); + let map_b = StorageMap::with_entries(vec![(key_b, value_b)]).unwrap(); + + let component_storage = vec![ + StorageSlot::with_map(slot_name_2.clone(), map_b), + StorageSlot::with_map(slot_name_1.clone(), map_a), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc map push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new([9u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + let account_id = account.id(); + let account_commitment = account.to_commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + let storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + let expected_slot_1 = BTreeMap::from_iter([(key_a, value_a)]); + let expected_slot_2 = BTreeMap::from_iter([(key_b, value_b)]); + + assert_storage_map_slot_entries(&storage, &slot_name_1, &expected_slot_1); + assert_storage_map_slot_entries(&storage, &slot_name_2, &expected_slot_2); +} + +#[test] +fn test_select_latest_account_storage_slot_updates() { + let mut conn = setup_test_db(); + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + let slot_name = StorageSlotName::mock(0); + let key_1 = Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_2 = Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let value_1 = Word::from([Felt::new(10), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_2 = Word::from([Felt::new(20), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_3 = Word::from([Felt::new(30), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let account = create_account_with_map_storage(slot_name.clone(), vec![(key_1, value_1)]); + let account_id = account.id(); + let account_commitment = account.to_commitment(); + + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key_1, value_2); + map_delta.insert(key_2, value_3); + let storage_delta = AccountStorageDelta::from_raw(BTreeMap::from_iter([( + slot_name.clone(), + StorageSlotDelta::Map(map_delta), + )])); + + let partial_delta = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::new(1)) + .unwrap(); + + let mut expected_account = account.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let expected_commitment = expected_account.to_commitment(); + + let account_update = BlockAccountUpdate::new( + account_id, + expected_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_2).expect("upsert_accounts failed"); + + let storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + let expected = BTreeMap::from_iter([(key_1, value_2), (key_2, value_3)]); + assert_storage_map_slot_entries(&storage, &slot_name, &expected); +} + // VAULT AT BLOCK HISTORICAL QUERY TESTS // ================================================================================================ @@ -692,9 +915,9 @@ fn test_select_account_vault_at_block_historical_with_updates() { // Insert vault asset at block 1: vault_key_1 = 1000 tokens let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ Felt::new(1), - Felt::new(0), - Felt::new(0), - Felt::new(0), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, ])); let asset_v1 = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); @@ -709,9 +932,9 @@ fn test_select_account_vault_at_block_historical_with_updates() { // Add a second vault_key at block 2 let vault_key_2 = AssetVaultKey::new_unchecked(Word::from([ Felt::new(2), - Felt::new(0), - Felt::new(0), - Felt::new(0), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, ])); let asset_key2 = Asset::Fungible(FungibleAsset::new(faucet_id, 500).unwrap()); insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_2, Some(asset_key2)) @@ -759,6 +982,66 @@ fn test_select_account_vault_at_block_historical_with_updates() { assert!(amounts.contains(&500), "Block 3 should have vault_key_2 with 500 tokens"); } +/// Tests that a 5-block history returns the correct asset per block. +#[test] +fn test_select_account_vault_at_block_exponential_updates() { + const BLOCK_COUNT: u32 = 5; + + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let blocks: Vec = (0..BLOCK_COUNT).map(BlockNumber::from).collect(); + + for block in &blocks { + insert_block_header(&mut conn, *block); + } + + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.to_commitment(), + AccountUpdateDetails::Delta(delta), + ); + + for block in &blocks { + upsert_accounts(&mut conn, std::slice::from_ref(&account_update), *block) + .expect("upsert_accounts failed"); + } + + let vault_key = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(3), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, + ])); + + for (index, block) in blocks.iter().enumerate() { + let amount = 1u64 << index; + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, amount).unwrap()); + insert_account_vault_asset(&mut conn, account_id, *block, vault_key, Some(asset)) + .expect("insert vault asset failed"); + } + + for (index, block) in blocks.iter().enumerate() { + let assets_at_block = select_account_vault_at_block(&mut conn, account_id, *block) + .expect("Query at block should succeed"); + + assert_eq!(assets_at_block.len(), 1, "Should have 1 asset at block"); + let expected_amount = 1u64 << index; + assert_matches!( + &assets_at_block[0], + Asset::Fungible(f) if f.amount() == expected_amount + ); + } +} + /// Tests that deleted vault assets (asset = None) are correctly excluded from results, /// and that the deduplication handles deletion entries properly. #[test] @@ -798,9 +1081,9 @@ fn test_select_account_vault_at_block_with_deletion() { // Insert vault asset at block 1 let vault_key = AssetVaultKey::new_unchecked(Word::from([ Felt::new(1), - Felt::new(0), - Felt::new(0), - Felt::new(0), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, ])); let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 35c38c5ad..ad2010f84 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -48,6 +48,8 @@ pub(crate) use notes::*; /// Apply a new block to the state /// +/// # Arguments +/// /// # Returns /// /// Number of records inserted and/or updated. diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 679186079..80ca27674 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -2456,13 +2456,12 @@ fn db_roundtrip_transactions() { let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); - let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) - .unwrap(); + let bob = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(bob, 0)], block_num).unwrap(); // Build two transaction headers with distinct data - let tx1 = mock_block_transaction(account_id, 1); - let tx2 = mock_block_transaction(account_id, 2); + let tx1 = mock_block_transaction(bob, 1); + let tx2 = mock_block_transaction(bob, 2); let ordered = OrderedTransactionHeaders::new_unchecked(vec![tx1.clone(), tx2.clone()]); // Insert @@ -2470,12 +2469,9 @@ fn db_roundtrip_transactions() { assert_eq!(count, 2, "Should insert 2 transactions"); // Retrieve - let (last_block, records) = queries::select_transactions_records( - &mut conn, - &[account_id], - BlockNumber::GENESIS..=block_num, - ) - .unwrap(); + let (last_block, records) = + queries::select_transactions_records(&mut conn, &[bob], BlockNumber::GENESIS..=block_num) + .unwrap(); assert_eq!(last_block, block_num, "Last block should match"); assert_eq!(records.len(), 2, "Should retrieve 2 transactions"); @@ -2487,42 +2483,21 @@ fn db_roundtrip_transactions() { .iter() .find(|tx| tx.id() == record.transaction_id) .expect("Retrieved transaction should match one of the originals"); - assert_eq!( - record.transaction_id, - original.id(), - "TransactionId DB roundtrip must be symmetric" - ); - assert_eq!( - record.account_id, - original.account_id(), - "AccountId DB roundtrip must be symmetric" - ); - assert_eq!(record.block_num, block_num, "Block number must match"); - assert_eq!( - record.initial_state_commitment, - original.initial_state_commitment(), - "Initial state commitment DB roundtrip must be symmetric" - ); - assert_eq!( - record.final_state_commitment, - original.final_state_commitment(), - "Final state commitment DB roundtrip must be symmetric" - ); + // Asset symmetry + assert_eq!(record.transaction_id, original.id(),); + assert_eq!(record.account_id, original.account_id(),); + assert_eq!(record.block_num, block_num); + assert_eq!(record.initial_state_commitment, original.initial_state_commitment(),); + assert_eq!(record.final_state_commitment, original.final_state_commitment(),); // Input notes are stored as nullifiers only let expected_nullifiers: Vec = original.input_notes().iter().map(InputNoteCommitment::nullifier).collect(); - assert_eq!( - record.nullifiers, expected_nullifiers, - "Nullifiers (from input notes) DB roundtrip must be symmetric" - ); + assert_eq!(record.nullifiers, expected_nullifiers,); // Output notes are stored as note IDs only let expected_note_ids: Vec = original.output_notes().iter().map(NoteHeader::id).collect(); - assert_eq!( - record.output_notes, expected_note_ids, - "Output note IDs DB roundtrip must be symmetric" - ); + assert_eq!(record.output_notes, expected_note_ids,); } } diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index dbff597c6..e70bdf0d3 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -230,7 +230,7 @@ impl InnerForest { // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- - /// Applies account updates from a block to the forest. + /// Updates the forest with account vault and storage changes from a delta. /// /// Iterates through account updates and applies each delta to the forest. /// Private accounts should be filtered out before calling this method. @@ -282,6 +282,7 @@ impl InnerForest { let account_id = delta.id(); let is_full_state = delta.is_full_state(); + // Validate full-state invariants in debug builds. #[cfg(debug_assertions)] if is_full_state { let has_vault_root = self.vault_roots.keys().any(|(id, _)| *id == account_id); @@ -294,16 +295,18 @@ impl InnerForest { ); } + // Apply vault changes. if is_full_state { self.insert_account_vault(block_num, account_id, delta.vault())?; } else if !delta.vault().is_empty() { self.update_account_vault(block_num, account_id, delta.vault())?; } + // Apply storage map changes. if is_full_state { - self.insert_account_storage(block_num, account_id, delta.storage()); + self.insert_account_storage(block_num, account_id, delta.storage())?; } else if !delta.storage().is_empty() { - self.update_account_storage(block_num, account_id, delta.storage()); + self.update_account_storage(block_num, account_id, delta.storage())?; } Ok(()) @@ -353,12 +356,78 @@ impl InnerForest { Ok(()) } - /// Updates the forest with vault changes from a delta. The vault delta is assumed to be - /// non-empty. + fn insert_account_storage( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + storage_delta: &AccountStorageDelta, + ) -> Result<(), InnerForestError> { + for (slot_name, map_delta) in storage_delta.maps() { + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); + assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + + let raw_map_entries: Vec<(Word, Word)> = + Vec::from_iter(map_delta.entries().iter().filter_map(|(&key, &value)| { + if value == EMPTY_WORD { + None + } else { + Some((Word::from(key), value)) + } + })); + + if raw_map_entries.is_empty() { + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), prev_root); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); + + continue; + } + + let hashed_entries: Vec<(Word, Word)> = Vec::from_iter( + raw_map_entries.iter().map(|(key, value)| (StorageMap::hash_key(*key), *value)), + ); + + let new_root = self.forest.batch_insert(prev_root, hashed_entries.iter().copied())?; + + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + + let num_entries = raw_map_entries.len(); + + let map_entries = BTreeMap::from_iter(raw_map_entries); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), map_entries); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + ?slot_name, + delta_entries = num_entries, + "Inserted storage map into forest" + ); + } + Ok(()) + } + + // ASSET VAULT DELTA PROCESSING + // -------------------------------------------------------------------------------------------- + + /// Updates the forest with vault changes from a delta and returns the new root. /// /// Processes both fungible and non-fungible asset changes, building entries for the vault SMT /// and tracking the new root. /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + /// + /// # Returns + /// + /// The new vault root after applying the delta. + /// /// # Errors /// /// Returns an error if applying a delta results in a negative balance. @@ -471,124 +540,47 @@ impl InnerForest { .unwrap_or_default() } - /// Inserts all storage maps from the provided storage delta into the forest. + /// Updates the forest with storage map changes from a delta and returns updated roots. /// - /// Assumes that storage maps for the provided account are not in the forest already. - fn insert_account_storage( + /// Processes storage map slot deltas, building SMTs for each modified slot + /// and tracking the new roots and accumulated entries. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + /// + /// # Returns + /// + /// A map from slot name to the new storage map root for that slot. + fn update_account_storage( &mut self, block_num: BlockNumber, account_id: AccountId, - delta: &AccountStorageDelta, - ) { - for (slot_name, map_delta) in delta.maps() { - // get the latest root for this map, and make sure the root is for an empty tree - let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); - - // build a vector of raw entries and filter out any empty values; such values - // shouldn't be present in full-state deltas, but it is good to exclude them - // explicitly - let raw_map_entries: Vec<(Word, Word)> = map_delta - .entries() - .iter() - .filter_map(|(&key, &value)| { - if value == EMPTY_WORD { - None - } else { - Some((Word::from(key), value)) - } - }) - .collect(); - - // if the delta is empty, make sure we create an entry in the storage map roots map - // and storage entries map (so storage_map_entries() queries work) - if raw_map_entries.is_empty() { - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), prev_root); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); - - continue; - } - - // hash the keys before inserting into the forest, matching how `StorageMap` - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = raw_map_entries - .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); - - // insert the updates into the forest and update storage map roots map - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); - - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); - - assert!(!raw_map_entries.is_empty(), "a non-empty delta should have entries"); - let num_entries = raw_map_entries.len(); + storage_delta: &AccountStorageDelta, + ) -> Result, InnerForestError> { + let mut updated_roots = BTreeMap::new(); - // keep track of the state of storage map entries (using raw keys for delta merging) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let map_entries = BTreeMap::from_iter(raw_map_entries); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), map_entries); + for (slot_name, map_delta) in storage_delta.maps() { + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - tracing::debug!( - target: crate::COMPONENT, - %account_id, - %block_num, - ?slot_name, - delta_entries = num_entries, - "Inserted storage map into forest" + let delta_entries = Vec::from_iter( + map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)), ); - } - } - /// Updates the forest with storage map changes from a delta. - /// - /// Processes storage map slot deltas, building SMTs for each modified slot and tracking the - /// new roots and accumulated entries. - fn update_account_storage( - &mut self, - block_num: BlockNumber, - account_id: AccountId, - delta: &AccountStorageDelta, - ) { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - - for (slot_name, map_delta) in delta.maps() { - // map delta shouldn't be empty, but if it is for some reason, there is nothing to do - if map_delta.is_empty() { + if delta_entries.is_empty() { continue; } - // update the storage map tree in the forest and add an entry to the storage map roots - let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - let delta_entries: Vec<(Word, Word)> = - map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); - - // Hash the keys before inserting into the forest, matching how StorageMap - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = delta_entries - .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); + let hashed_entries = + delta_entries.iter().map(|(key, value)| (StorageMap::hash_key(*key), *value)); - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); + let updated_root = self.forest.batch_insert(prev_root, hashed_entries)?; self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); + .insert((account_id, slot_name.clone(), block_num), updated_root); + updated_roots.insert(slot_name.clone(), updated_root); - // merge the delta with the latest entries in the map (using raw keys) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); for (key, value) in &delta_entries { if *value == EMPTY_WORD { @@ -610,6 +602,8 @@ impl InnerForest { "Updated storage map in forest" ); } + + Ok(updated_roots) } // TODO: tie in-memory forest retention to DB pruning policy once forest queries rely on it. diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 79bdbd0c8..1c043a2d6 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -445,6 +445,56 @@ fn test_storage_map_incremental_updates() { assert_ne!(root_1, root_3); } +#[test] +fn test_storage_map_removals() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + const SLOT_INDEX: usize = 3; + const KEY_1: [u32; 4] = [1, 0, 0, 0]; + const KEY_2: [u32; 4] = [2, 0, 0, 0]; + const VALUE_1: [u32; 4] = [10, 0, 0, 0]; + const VALUE_2: [u32; 4] = [20, 0, 0, 0]; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(SLOT_INDEX); + let key_1 = Word::from(KEY_1); + let key_2 = Word::from(KEY_2); + let value_1 = Word::from(VALUE_1); + let value_2 = Word::from(VALUE_2); + + let block_1 = BlockNumber::GENESIS.child(); + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key_1, value_1); + map_delta_1.insert(key_2, value_2); + let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + let block_2 = block_1.child(); + let map_delta_2 = StorageMapDelta::from_iters([key_1], []); + let raw_2 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_2))]); + let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); + forest.update_account(block_2, &delta_2).unwrap(); + + let entries = forest + .storage_map_entries(account_id, slot_name, block_2) + .expect("storage entries should be available"); + + let StorageMapEntries::AllEntries(entries) = entries.entries else { + panic!("expected entries without proofs"); + }; + + let entries_by_key = BTreeMap::from_iter(entries); + assert_eq!(entries_by_key.len(), 1); + assert_eq!(entries_by_key.get(&key_2), Some(&value_2)); + assert!(!entries_by_key.contains_key(&key_1)); +} + #[test] fn test_empty_storage_map_entries_query() { use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; From 4a4bfa507fa4b23aa98c10f3addfba7eccb71da0 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:12:21 +0200 Subject: [PATCH 74/77] fix(store): network notes must be public (#1738) --- CHANGELOG.md | 1 + Cargo.lock | 21 ++- .../src/mempool/subscription.rs | 10 +- crates/ntx-builder/Cargo.toml | 2 +- crates/ntx-builder/src/actor/inflight_note.rs | 22 ++- crates/ntx-builder/src/actor/mod.rs | 8 +- crates/ntx-builder/src/builder.rs | 8 - crates/ntx-builder/src/coordinator.rs | 26 ++- crates/ntx-builder/src/db/mod.rs | 6 +- crates/ntx-builder/src/db/models/conv.rs | 21 +-- .../ntx-builder/src/db/models/queries/mod.rs | 15 +- .../src/db/models/queries/notes.rs | 23 ++- .../src/db/models/queries/tests.rs | 55 +++--- crates/ntx-builder/src/store.rs | 6 +- crates/proto/src/domain/mempool.rs | 4 +- crates/proto/src/domain/note.rs | 170 +++--------------- crates/proto/src/errors/mod.rs | 5 +- 17 files changed, 135 insertions(+), 268 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc1cef52c..63ec1514b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - Fixed `bundled start` panicking due to duplicate `data_directory` clap argument name between `BundledCommand::Start` and `NtxBuilderConfig` ([#1732](https://github.com/0xMiden/node/pull/1732)). - Fixed `bundled bootstrap` requiring `--validator.key.hex` or `--validator.key.kms-id` despite a default key being configured ([#1732](https://github.com/0xMiden/node/pull/1732)). +- Fixed incorrectly classifying private notes with the network attachment as network notes ([#1378](https://github.com/0xMiden/node/pull/1738)). ## v0.13.7 (2026-02-25) diff --git a/Cargo.lock b/Cargo.lock index 75fd39949..bb93114ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2629,7 +2629,7 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miden-agglayer" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "fs-err", "miden-assembly", @@ -2700,7 +2700,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -3006,7 +3006,6 @@ dependencies = [ "miden-remote-prover-client", "miden-standards", "miden-tx", - "prost", "rand_chacha", "rstest", "tempfile", @@ -3245,7 +3244,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "bech32", "fs-err", @@ -3275,7 +3274,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "proc-macro2", "quote", @@ -3358,7 +3357,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "fs-err", "miden-assembly", @@ -3375,7 +3374,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3398,7 +3397,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "miden-processor", "miden-protocol", @@ -3411,7 +3410,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#3154a371939125e5cc3faf39a7c42447db67584f" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" dependencies = [ "miden-protocol", "miden-tx", @@ -4111,7 +4110,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck", - "itertools 0.14.0", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -4133,7 +4132,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.114", diff --git a/crates/block-producer/src/mempool/subscription.rs b/crates/block-producer/src/mempool/subscription.rs index 6bfbf7eaa..8d0eb9094 100644 --- a/crates/block-producer/src/mempool/subscription.rs +++ b/crates/block-producer/src/mempool/subscription.rs @@ -2,9 +2,9 @@ use std::collections::{BTreeMap, HashSet}; use std::ops::Mul; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::NetworkNote; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::transaction::{OutputNote, TransactionId}; +use miden_standards::note::NetworkNoteExt; use tokio::sync::mpsc; use crate::domain::transaction::AuthenticatedTransaction; @@ -83,7 +83,13 @@ impl SubscriptionProvider { let network_notes = tx .output_notes() .filter_map(|note| match note { - OutputNote::Full(inner) => NetworkNote::try_from(inner.clone()).ok(), + // We check first to avoid cloning non-network notes. + OutputNote::Full(inner) => inner.is_network_note().then_some( + inner + .clone() + .into_account_target_network_note() + .expect("we just checked that this is a network note"), + ), _ => None, }) .collect(); diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 9ef143cae..6110a7a6d 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -24,8 +24,8 @@ miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["tx-prover"], workspace = true } +miden-standards = { workspace = true } miden-tx = { default-features = true, workspace = true } -prost = { workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs index 4cc080862..401cc7d00 100644 --- a/crates/ntx-builder/src/actor/inflight_note.rs +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -1,6 +1,6 @@ -use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::block::BlockNumber; -use miden_protocol::note::Note; +use miden_protocol::note::{Note, Nullifier}; +use miden_standards::note::AccountTargetNetworkNote; use crate::actor::has_backoff_passed; @@ -14,14 +14,14 @@ use crate::actor::has_backoff_passed; /// will likely be soon after the number that is recorded here. #[derive(Debug, Clone)] pub struct InflightNetworkNote { - note: SingleTargetNetworkNote, + note: AccountTargetNetworkNote, attempt_count: usize, last_attempt: Option, } impl InflightNetworkNote { /// Creates a new inflight network note. - pub fn new(note: SingleTargetNetworkNote) -> Self { + pub fn new(note: AccountTargetNetworkNote) -> Self { Self { note, attempt_count: 0, @@ -31,7 +31,7 @@ impl InflightNetworkNote { /// Reconstructs an inflight network note from its constituent parts (e.g., from DB rows). pub fn from_parts( - note: SingleTargetNetworkNote, + note: AccountTargetNetworkNote, attempt_count: usize, last_attempt: Option, ) -> Self { @@ -39,12 +39,12 @@ impl InflightNetworkNote { } /// Consumes the inflight network note and returns the inner network note. - pub fn into_inner(self) -> SingleTargetNetworkNote { + pub fn into_inner(self) -> AccountTargetNetworkNote { self.note } /// Returns a reference to the inner network note. - pub fn to_inner(&self) -> &SingleTargetNetworkNote { + pub fn to_inner(&self) -> &AccountTargetNetworkNote { &self.note } @@ -57,7 +57,7 @@ impl InflightNetworkNote { /// /// The note is available if the backoff period has passed. pub fn is_available(&self, block_num: BlockNumber) -> bool { - self.note.can_be_consumed(block_num).unwrap_or(true) + self.note.execution_hint().can_be_consumed(block_num).unwrap_or(true) && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) } @@ -66,10 +66,14 @@ impl InflightNetworkNote { self.last_attempt = Some(block_num); self.attempt_count += 1; } + + pub fn nullifier(&self) -> Nullifier { + self.note.as_note().nullifier() + } } impl From for Note { fn from(value: InflightNetworkNote) -> Self { - value.into_inner().into() + value.into_inner().into_note() } } diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index ecb72552b..4533b6259 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -17,13 +17,14 @@ use miden_node_utils::lru_cache::LruCache; use miden_protocol::Word; use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::{Note, NoteScript, Nullifier}; +use miden_protocol::note::{NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; +use crate::actor::inflight_note::InflightNetworkNote; use crate::block_producer::BlockProducerClient; use crate::builder::ChainState; use crate::db::Db; @@ -386,10 +387,7 @@ impl AccountActor { Err(err) => { tracing::error!(err = err.as_report(), "network transaction failed"); self.mode = ActorMode::NoViableNotes; - let nullifiers: Vec<_> = notes - .into_iter() - .map(|note| Note::from(note.into_inner()).nullifier()) - .collect(); + let nullifiers: Vec<_> = notes.iter().map(InflightNetworkNote::nullifier).collect(); self.mark_notes_failed(&nullifiers, block_num).await; }, } diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 20090c5b9..adaee152a 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -212,14 +212,6 @@ impl NetworkTransactionBuilder { .await .context("failed to load notes from store")?; - let notes: Vec<_> = notes - .into_iter() - .map(|n| { - let miden_node_proto::domain::note::NetworkNote::SingleTarget(note) = n; - note - }) - .collect(); - // Write account and notes to DB. self.db .sync_account_from_store(account_id, account.clone(), notes.clone()) diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index a857bdc64..af2b840e4 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -5,7 +5,6 @@ use anyhow::Context; use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; use miden_protocol::account::delta::AccountUpdateDetails; use tokio::sync::mpsc::error::SendError; use tokio::sync::{Semaphore, mpsc}; @@ -239,10 +238,12 @@ impl Coordinator { // Determine target actors for each note. for note in network_notes { - let NetworkNote::SingleTarget(note) = note; - let network_account_id = note.account_id(); - if let Some(actor) = self.actor_registry.get(&network_account_id) { - target_actors.insert(network_account_id, actor); + let account = note.target_account_id(); + let account = NetworkAccountId::try_from(account) + .expect("network note target account should be a network account"); + + if let Some(actor) = self.actor_registry.get(&account) { + target_actors.insert(account, actor); } } } @@ -268,16 +269,13 @@ impl Coordinator { network_notes, account_delta, } => { - let notes: Vec = network_notes - .iter() - .map(|n| { - let NetworkNote::SingleTarget(note) = n; - note.clone() - }) - .collect(); - self.db - .handle_transaction_added(*id, account_delta.clone(), notes, nullifiers.clone()) + .handle_transaction_added( + *id, + account_delta.clone(), + network_notes.clone(), + nullifiers.clone(), + ) .await?; Ok(Vec::new()) }, diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs index 47352e29e..37a31159b 100644 --- a/crates/ntx-builder/src/db/mod.rs +++ b/crates/ntx-builder/src/db/mod.rs @@ -3,13 +3,13 @@ use std::path::PathBuf; use anyhow::Context; use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::Word; use miden_protocol::account::Account; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::{NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; +use miden_standards::note::AccountTargetNetworkNote; use tracing::{info, instrument}; use crate::COMPONENT; @@ -112,7 +112,7 @@ impl Db { &self, tx_id: TransactionId, account_delta: Option, - notes: Vec, + notes: Vec, nullifiers: Vec, ) -> Result<()> { self.inner @@ -173,7 +173,7 @@ impl Db { &self, account_id: NetworkAccountId, account: Account, - notes: Vec, + notes: Vec, ) -> Result<()> { self.inner .transact("sync_account_from_store", move |conn| { diff --git a/crates/ntx-builder/src/db/models/conv.rs b/crates/ntx-builder/src/db/models/conv.rs index 26bb99868..b32a29253 100644 --- a/crates/ntx-builder/src/db/models/conv.rs +++ b/crates/ntx-builder/src/db/models/conv.rs @@ -2,15 +2,12 @@ use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; -use miden_node_proto::generated as proto; use miden_protocol::Word; use miden_protocol::account::{Account, AccountId}; use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::note::{Note, NoteScript, Nullifier}; +use miden_protocol::note::{NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; use miden_tx::utils::{Deserializable, Serializable}; -use prost::Message; // SERIALIZATION (domain → DB) // ================================================================================================ @@ -44,12 +41,6 @@ pub fn block_num_from_i64(val: i64) -> BlockNumber { BlockNumber::from(val as u32) } -/// Serializes a `SingleTargetNetworkNote` to bytes using its protobuf representation. -pub fn single_target_note_to_bytes(note: &SingleTargetNetworkNote) -> Vec { - let proto_note: proto::note::NetworkNote = Note::from(note.clone()).into(); - proto_note.encode_to_vec() -} - // DESERIALIZATION (DB → domain) // ================================================================================================ @@ -67,16 +58,6 @@ pub fn network_account_id_from_bytes(bytes: &[u8]) -> Result Result { - let proto_note = proto::note::NetworkNote::decode(bytes) - .map_err(|e| DatabaseError::deserialization("network note proto", e))?; - SingleTargetNetworkNote::try_from(proto_note) - .map_err(|e| DatabaseError::deserialization("network note conversion", e)) -} - pub fn word_to_bytes(word: &Word) -> Vec { word.to_bytes() } diff --git a/crates/ntx-builder/src/db/models/queries/mod.rs b/crates/ntx-builder/src/db/models/queries/mod.rs index 2ee11ee28..9018c7eb3 100644 --- a/crates/ntx-builder/src/db/models/queries/mod.rs +++ b/crates/ntx-builder/src/db/models/queries/mod.rs @@ -3,11 +3,12 @@ use diesel::prelude::*; use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::TransactionId; +use miden_standards::note::AccountTargetNetworkNote; +use miden_tx::utils::Serializable; use crate::actor::account_effect::NetworkAccountEffect; use crate::db::models::conv as conversions; @@ -97,7 +98,7 @@ pub fn add_transaction( conn: &mut SqliteConnection, tx_id: &TransactionId, account_delta: Option<&AccountUpdateDetails>, - notes: &[SingleTargetNetworkNote], + notes: &[AccountTargetNetworkNote], nullifiers: &[Nullifier], ) -> Result<(), DatabaseError> { let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); @@ -138,9 +139,13 @@ pub fn add_transaction( // (the nullifier PK would otherwise cause a constraint violation). for note in notes { let insert = NoteInsert { - nullifier: conversions::nullifier_to_bytes(¬e.nullifier()), - account_id: conversions::network_account_id_to_bytes(note.account_id()), - note_data: conversions::single_target_note_to_bytes(note), + nullifier: conversions::nullifier_to_bytes(¬e.as_note().nullifier()), + account_id: conversions::network_account_id_to_bytes( + note.target_account_id() + .try_into() + .expect("network note's target account must be a network account"), + ), + note_data: note.as_note().to_bytes(), attempt_count: 0, last_attempt: None, created_by: Some(tx_id_bytes.clone()), diff --git a/crates/ntx-builder/src/db/models/queries/notes.rs b/crates/ntx-builder/src/db/models/queries/notes.rs index 1c0145a9b..b512e57bc 100644 --- a/crates/ntx-builder/src/db/models/queries/notes.rs +++ b/crates/ntx-builder/src/db/models/queries/notes.rs @@ -3,9 +3,10 @@ use diesel::prelude::*; use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::block::BlockNumber; -use miden_protocol::note::Nullifier; +use miden_protocol::note::{Note, Nullifier}; +use miden_standards::note::AccountTargetNetworkNote; +use miden_tx::utils::{Deserializable, Serializable}; use crate::actor::inflight_note::InflightNetworkNote; use crate::db::models::conv as conversions; @@ -54,13 +55,16 @@ pub struct NoteInsert { /// ``` pub fn insert_committed_notes( conn: &mut SqliteConnection, - notes: &[SingleTargetNetworkNote], + notes: &[AccountTargetNetworkNote], ) -> Result<(), DatabaseError> { for note in notes { let row = NoteInsert { - nullifier: conversions::nullifier_to_bytes(¬e.nullifier()), - account_id: conversions::network_account_id_to_bytes(note.account_id()), - note_data: conversions::single_target_note_to_bytes(note), + nullifier: conversions::nullifier_to_bytes(¬e.as_note().nullifier()), + account_id: conversions::network_account_id_to_bytes( + NetworkAccountId::try_from(note.target_account_id()) + .expect("account ID of a network note should be a network account"), + ), + note_data: note.as_note().to_bytes(), attempt_count: 0, last_attempt: None, created_by: None, @@ -161,6 +165,11 @@ fn note_row_to_inflight( attempt_count: usize, last_attempt: Option, ) -> Result { - let note = conversions::single_target_note_from_bytes(note_data)?; + let note = Note::read_from_bytes(note_data) + .map_err(|source| DatabaseError::deserialization("failed to parse note", source))?; + let note = AccountTargetNetworkNote::new(note).map_err(|source| { + DatabaseError::deserialization("failed to convert to network note", source) + })?; + Ok(InflightNetworkNote::from_parts(note, attempt_count, last_attempt)) } diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs index 2b558a49d..83c62426b 100644 --- a/crates/ntx-builder/src/db/models/queries/tests.rs +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -2,7 +2,6 @@ use diesel::prelude::*; use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::Word; use miden_protocol::account::{ AccountComponentMetadata, @@ -59,7 +58,7 @@ fn mock_tx_id(seed: u64) -> TransactionId { fn mock_single_target_note( network_account_id: NetworkAccountId, seed: u8, -) -> SingleTargetNetworkNote { +) -> AccountTargetNetworkNote { let mut rng = ChaCha20Rng::from_seed([seed; 32]); let sender = AccountIdBuilder::new() .account_type(AccountType::RegularAccountImmutableCode) @@ -71,7 +70,7 @@ fn mock_single_target_note( let note = NoteBuilder::new(sender, rng).attachment(target).build().unwrap(); - SingleTargetNetworkNote::try_from(note).expect("note should be single-target network note") + AccountTargetNetworkNote::new(note).expect("note should be single-target network note") } /// Counts the total number of rows in the `notes` table. @@ -124,7 +123,7 @@ fn purge_inflight_clears_all_inflight_state() { // Mark note as consumed by another tx. let tx_id2 = mock_tx_id(2); - add_transaction(conn, &tx_id2, None, &[], &[note.nullifier()]).unwrap(); + add_transaction(conn, &tx_id2, None, &[], &[note.as_note().nullifier()]).unwrap(); // Verify consumed_by is set. let consumed_count: i64 = schema::notes::table @@ -162,15 +161,21 @@ fn transaction_added_inserts_notes_and_marks_consumed() { assert_eq!(count_notes(conn), 1); // Add transaction that creates note2 and consumes note1. - add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e2), &[note1.nullifier()]) - .unwrap(); + add_transaction( + conn, + &tx_id, + None, + std::slice::from_ref(¬e2), + &[note1.as_note().nullifier()], + ) + .unwrap(); // Should now have 2 notes total. assert_eq!(count_notes(conn), 2); // note1 should be consumed. let consumed: Option> = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e1.nullifier())) + .find(conversions::nullifier_to_bytes(¬e1.as_note().nullifier())) .select(schema::notes::consumed_by) .first(conn) .unwrap(); @@ -178,7 +183,7 @@ fn transaction_added_inserts_notes_and_marks_consumed() { // note2 should have created_by set. let created: Option> = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e2.nullifier())) + .find(conversions::nullifier_to_bytes(¬e2.as_note().nullifier())) .select(schema::notes::created_by) .first(conn) .unwrap(); @@ -219,7 +224,7 @@ fn block_committed_promotes_inflight_notes_to_committed() { // Verify created_by is set. let created: Option> = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) .select(schema::notes::created_by) .first(conn) .unwrap(); @@ -230,7 +235,7 @@ fn block_committed_promotes_inflight_notes_to_committed() { // created_by should now be NULL (promoted to committed). let created: Option> = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) .select(schema::notes::created_by) .first(conn) .unwrap(); @@ -250,7 +255,7 @@ fn block_committed_deletes_consumed_notes() { // Consume it via a transaction. let tx_id = mock_tx_id(1); - add_transaction(conn, &tx_id, None, &[], &[note.nullifier()]).unwrap(); + add_transaction(conn, &tx_id, None, &[], &[note.as_note().nullifier()]).unwrap(); // Commit the block. let block_num = BlockNumber::from(1u32); @@ -309,11 +314,11 @@ fn transactions_reverted_restores_consumed_notes() { // Consume it via a transaction. let tx_id = mock_tx_id(1); - add_transaction(conn, &tx_id, None, &[], &[note.nullifier()]).unwrap(); + add_transaction(conn, &tx_id, None, &[], &[note.as_note().nullifier()]).unwrap(); // Verify consumed. let consumed: Option> = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) .select(schema::notes::consumed_by) .first(conn) .unwrap(); @@ -325,7 +330,7 @@ fn transactions_reverted_restores_consumed_notes() { // Note should be un-consumed. let consumed: Option> = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) .select(schema::notes::consumed_by) .first(conn) .unwrap(); @@ -394,13 +399,13 @@ fn available_notes_filters_consumed_and_exceeded_attempts() { // Consume one note. let tx_id = mock_tx_id(1); - add_transaction(conn, &tx_id, None, &[], &[note_consumed.nullifier()]).unwrap(); + add_transaction(conn, &tx_id, None, &[], &[note_consumed.as_note().nullifier()]).unwrap(); // Mark one note as failed many times (exceed max_attempts=3). let block_num = BlockNumber::from(100u32); - notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); - notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); - notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.as_note().nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.as_note().nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.as_note().nullifier()], block_num).unwrap(); // Query available notes with max_attempts=3. let result = available_notes(conn, account_id, block_num, 3).unwrap(); @@ -408,7 +413,7 @@ fn available_notes_filters_consumed_and_exceeded_attempts() { // Only note_good should be available (note_consumed is consumed, note_failed exceeded // attempts). assert_eq!(result.len(), 1); - assert_eq!(result[0].to_inner().nullifier(), note_good.nullifier()); + assert_eq!(result[0].nullifier(), note_good.as_note().nullifier()); } #[test] @@ -427,7 +432,7 @@ fn available_notes_only_returns_notes_for_specified_account() { let result = available_notes(conn, account_id_1, block_num, 30).unwrap(); assert_eq!(result.len(), 1); - assert_eq!(result[0].to_inner().nullifier(), note_acct1.nullifier()); + assert_eq!(result[0].nullifier(), note_acct1.as_note().nullifier()); } // NOTES FAILED TESTS @@ -443,11 +448,11 @@ fn notes_failed_increments_attempt_count() { insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); let block_num = BlockNumber::from(5u32); - notes_failed(conn, &[note.nullifier()], block_num).unwrap(); - notes_failed(conn, &[note.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note.as_note().nullifier()], block_num).unwrap(); + notes_failed(conn, &[note.as_note().nullifier()], block_num).unwrap(); let (attempt_count, last_attempt): (i32, Option) = schema::notes::table - .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) .select((schema::notes::attempt_count, schema::notes::last_attempt)) .first(conn) .unwrap(); @@ -493,7 +498,7 @@ fn note_script_insert_and_lookup() { // Extract a NoteScript from a mock note. let account_id = mock_network_account_id(); - let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into_note(); let script = note.script().clone(); let root = script.root(); @@ -520,7 +525,7 @@ fn note_script_insert_is_idempotent() { let (conn, _dir) = &mut test_conn(); let account_id = mock_network_account_id(); - let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into_note(); let script = note.script().clone(); let root = script.root(); diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index b04a9d75f..f62746f88 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -4,7 +4,6 @@ use std::time::Duration; use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; use miden_node_proto::domain::account::{AccountDetails, AccountResponse, NetworkAccountId}; -use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::{self as proto}; @@ -26,6 +25,7 @@ use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks, PartialMmr}; use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::note::NoteScript; use miden_protocol::transaction::AccountInputs; +use miden_standards::note::AccountTargetNetworkNote; use miden_tx::utils::{Deserializable, Serializable}; use thiserror::Error; use tracing::{info, instrument}; @@ -196,7 +196,7 @@ impl StoreClient { &self, network_account_id: NetworkAccountId, block_num: u32, - ) -> Result, StoreError> { + ) -> Result, StoreError> { // Upper bound of each note is ~10KB. Limit page size to ~10MB. const PAGE_SIZE: u64 = 1024; @@ -215,7 +215,7 @@ impl StoreClient { all_notes.reserve(resp.notes.len()); for note in resp.notes { - all_notes.push(NetworkNote::try_from(note)?); + all_notes.push(AccountTargetNetworkNote::try_from(note)?); } match resp.next_token { diff --git a/crates/proto/src/domain/mempool.rs b/crates/proto/src/domain/mempool.rs index 332cd6772..c9bf76bfc 100644 --- a/crates/proto/src/domain/mempool.rs +++ b/crates/proto/src/domain/mempool.rs @@ -5,8 +5,8 @@ use miden_protocol::block::BlockHeader; use miden_protocol::note::Nullifier; use miden_protocol::transaction::TransactionId; use miden_protocol::utils::{Deserializable, Serializable}; +use miden_standards::note::AccountTargetNetworkNote; -use super::note::NetworkNote; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -15,7 +15,7 @@ pub enum MempoolEvent { TransactionAdded { id: TransactionId, nullifiers: Vec, - network_notes: Vec, + network_notes: Vec, account_delta: Option, }, BlockCommitted { diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index fa8425d98..f92ac7517 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ Note, @@ -12,14 +11,11 @@ use miden_protocol::note::{ NoteScript, NoteTag, NoteType, - Nullifier, }; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{MastForest, MastNodeId, Word}; -use miden_standards::note::{NetworkAccountTarget, NetworkAccountTargetError}; -use thiserror::Error; +use miden_standards::note::AccountTargetNetworkNote; -use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -93,9 +89,9 @@ impl From for proto::note::Note { } } -impl From for proto::note::NetworkNote { - fn from(note: NetworkNote) -> Self { - let note = Note::from(note); +impl From for proto::note::NetworkNote { + fn from(note: AccountTargetNetworkNote) -> Self { + let note = note.into_note(); Self { metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: NoteDetails::from(note).to_bytes(), @@ -103,6 +99,22 @@ impl From for proto::note::NetworkNote { } } +impl TryFrom for AccountTargetNetworkNote { + type Error = ConversionError; + + fn try_from(value: proto::note::NetworkNote) -> Result { + let details = NoteDetails::read_from_bytes(&value.details) + .map_err(|err| ConversionError::deserialization_error("NoteDetails", err))?; + let (assets, recipient) = details.into_parts(); + let metadata: NoteMetadata = value + .metadata + .ok_or_else(|| proto::note::NetworkNote::missing_field(stringify!(metadata)))? + .try_into()?; + let note = Note::new(assets, metadata, recipient); + AccountTargetNetworkNote::new(note).map_err(ConversionError::NetworkNoteError) + } +} + impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); @@ -207,148 +219,6 @@ impl TryFrom for Note { } } -// NETWORK NOTE -// ================================================================================================ - -/// An enum that wraps around notes used in a network mode. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum NetworkNote { - SingleTarget(SingleTargetNetworkNote), -} - -impl NetworkNote { - pub fn inner(&self) -> &Note { - match self { - NetworkNote::SingleTarget(note) => note.inner(), - } - } - - pub fn metadata(&self) -> &NoteMetadata { - self.inner().metadata() - } - - pub fn nullifier(&self) -> Nullifier { - self.inner().nullifier() - } - - pub fn id(&self) -> NoteId { - self.inner().id() - } -} - -impl From for Note { - fn from(value: NetworkNote) -> Self { - match value { - NetworkNote::SingleTarget(note) => note.into(), - } - } -} - -impl TryFrom for NetworkNote { - type Error = NetworkNoteError; - - fn try_from(note: Note) -> Result { - SingleTargetNetworkNote::try_from(note).map(NetworkNote::SingleTarget) - } -} - -impl TryFrom for NetworkNote { - type Error = ConversionError; - - fn try_from(proto_note: proto::note::NetworkNote) -> Result { - from_proto(proto_note) - } -} - -// SINGLE TARGET NETWORK NOTE -// ================================================================================================ - -/// A newtype that wraps around notes targeting a single network account. -/// -/// A note is considered a single-target network note if its attachment -/// is a valid `NetworkAccountTarget`. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct SingleTargetNetworkNote { - note: Note, - account_target: NetworkAccountTarget, -} - -impl SingleTargetNetworkNote { - pub fn inner(&self) -> &Note { - &self.note - } - - pub fn metadata(&self) -> &NoteMetadata { - self.inner().metadata() - } - - pub fn nullifier(&self) -> Nullifier { - self.inner().nullifier() - } - - pub fn id(&self) -> NoteId { - self.inner().id() - } - - /// The network account ID that this note targets. - pub fn account_id(&self) -> NetworkAccountId { - self.account_target.target_id().try_into().expect("always a network account ID") - } - - pub fn can_be_consumed(&self, block_num: BlockNumber) -> Option { - self.account_target.execution_hint().can_be_consumed(block_num) - } -} - -impl From for Note { - fn from(value: SingleTargetNetworkNote) -> Self { - value.note - } -} - -impl TryFrom for SingleTargetNetworkNote { - type Error = NetworkNoteError; - - fn try_from(note: Note) -> Result { - // Single-target network notes are identified by having a NetworkAccountTarget attachment - let attachment = note.metadata().attachment(); - let account_target = NetworkAccountTarget::try_from(attachment) - .map_err(NetworkNoteError::InvalidAttachment)?; - Ok(Self { note, account_target }) - } -} - -impl TryFrom for SingleTargetNetworkNote { - type Error = ConversionError; - - fn try_from(proto_note: proto::note::NetworkNote) -> Result { - from_proto(proto_note) - } -} - -/// Helper function to deduplicate implementations `TryFrom`. -fn from_proto(proto_note: proto::note::NetworkNote) -> Result -where - T: TryFrom, - T::Error: Into, -{ - let details = NoteDetails::read_from_bytes(&proto_note.details) - .map_err(|err| ConversionError::deserialization_error("NoteDetails", err))?; - let (assets, recipient) = details.into_parts(); - let metadata: NoteMetadata = proto_note - .metadata - .ok_or_else(|| proto::note::NetworkNote::missing_field(stringify!(metadata)))? - .try_into()?; - let note = Note::new(assets, metadata, recipient); - T::try_from(note).map_err(Into::into) -} - -#[derive(Debug, Error)] -pub enum NetworkNoteError { - #[error("note does not have a valid NetworkAccountTarget attachment: {0}")] - InvalidAttachment(#[source] NetworkAccountTargetError), -} - // NOTE SCRIPT // ================================================================================================ diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index d2fc93616..04493e696 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -6,10 +6,9 @@ pub use miden_node_grpc_error_macro::GrpcError; use miden_protocol::crypto::merkle::smt::{SmtLeafError, SmtProofError}; use miden_protocol::errors::{AccountError, AssetError, FeeError, NoteError, StorageSlotNameError}; use miden_protocol::utils::DeserializationError; +use miden_standards::note::NetworkAccountTargetError; use thiserror::Error; -use crate::domain::note::NetworkNoteError; - #[cfg(test)] mod test_macro; @@ -28,7 +27,7 @@ pub enum ConversionError { #[error("note error")] NoteError(#[from] NoteError), #[error("network note error")] - NetworkNoteError(#[from] NetworkNoteError), + NetworkNoteError(#[source] NetworkAccountTargetError), #[error("SMT leaf error")] SmtLeafError(#[from] SmtLeafError), #[error("SMT proof error")] From f6afa69e684ee12eccf8c8c9f7332c9ab9e17291 Mon Sep 17 00:00:00 2001 From: Marti Date: Wed, 4 Mar 2026 16:25:20 +0100 Subject: [PATCH 75/77] chore: prepare release `v0.14.0-alpha.1` (#1743) * chore: bump workspace version to 0.14.0-alpha.1 Pin protocol dependencies to published crate versions and update all workspace crate versions to =0.14.0-alpha.1. https://claude.ai/code/session_017j1F8cwRXFnWyjzzwN9d4k * ci: support publishing from non-main branches Use github.event.release.target_commitish to dynamically determine which branch to checkout and verify against, instead of hardcoding main. This enables releasing alpha versions from the next branch. https://claude.ai/code/session_017j1F8cwRXFnWyjzzwN9d4k * feat: use StorageMapEntry instead of Word * feat: use StorageMapEntry in tests * chore: simplify Digest<>StorageMapEntry conv * chore: rename key -> key_hash * Apply suggestions from code review Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> * chore: simplify crate publishing step * chore: bring msvr check from main --------- Co-authored-by: Claude Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- .../{publish-main.yml => publish-crates.yml} | 22 +-- Cargo.lock | 74 +++++---- Cargo.toml | 41 +++-- bin/network-monitor/src/deploy/mod.rs | 4 +- crates/ntx-builder/src/actor/execute.rs | 3 +- crates/ntx-builder/src/store.rs | 3 +- crates/proto/src/domain/account.rs | 13 +- crates/proto/src/domain/account/tests.rs | 11 +- crates/proto/src/domain/digest.rs | 21 +++ crates/store/Cargo.toml | 2 +- .../store/src/db/models/queries/accounts.rs | 26 +-- .../src/db/models/queries/accounts/delta.rs | 7 +- .../db/models/queries/accounts/delta/tests.rs | 5 +- .../src/db/models/queries/accounts/tests.rs | 26 +-- crates/store/src/db/tests.rs | 31 ++-- crates/store/src/inner_forest/mod.rs | 33 ++-- crates/store/src/inner_forest/tests.rs | 14 +- crates/store/src/server/ntx_builder.rs | 7 +- crates/store/src/state/mod.rs | 4 +- .../validator/src/tx_validation/data_store.rs | 4 +- scripts/check-msrv.sh | 153 ++++++++++++++++++ 21 files changed, 347 insertions(+), 157 deletions(-) rename .github/workflows/{publish-main.yml => publish-crates.yml} (63%) create mode 100755 scripts/check-msrv.sh diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-crates.yml similarity index 63% rename from .github/workflows/publish-main.yml rename to .github/workflows/publish-crates.yml index f53033f74..7fd35b712 100644 --- a/.github/workflows/publish-main.yml +++ b/.github/workflows/publish-crates.yml @@ -1,4 +1,4 @@ -name: Publish (main) +name: Publish crates permissions: contents: read @@ -17,25 +17,13 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: main + ref: ${{ github.event.release.tag_name }} - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler - # Ensure the release tag refers to the latest commit on main. - # Compare the commit SHA that triggered the workflow with the HEAD of the branch we just - # checked out (main). - - name: Verify release was triggered from main HEAD + - name: Log release info run: | - tag_sha="${{ github.sha }}" - main_sha="$(git rev-parse HEAD)" - - echo "Tag points to: $tag_sha" - echo "Current main HEAD is: $main_sha" - - if [ "$tag_sha" != "$main_sha" ]; then - echo "::error::The release tag was not created from the latest commit on main. Aborting." - exit 1 - fi - echo "Release tag matches main HEAD — continuing." + echo "Publishing release ${{ github.event.release.tag_name }}" + echo "Commit: $(git rev-parse HEAD)" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - name: Install dependencies diff --git a/Cargo.lock b/Cargo.lock index bb93114ac..6bc2f415f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2628,8 +2628,9 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miden-agglayer" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e492a6044cf8875a64d7eec130d260f2eda1c783795261f00d5d52837ed027bd" dependencies = [ "fs-err", "miden-assembly", @@ -2699,8 +2700,9 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9564dfb23c529aad68369845b6897a6f62bacdeab7c00db432a5f16670764d4" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2818,7 +2820,7 @@ dependencies = [ [[package]] name = "miden-large-smt-backend-rocksdb" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "miden-crypto", "miden-protocol", @@ -2883,7 +2885,7 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "axum", @@ -2912,7 +2914,7 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "clap", @@ -2933,7 +2935,7 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", @@ -2969,7 +2971,7 @@ dependencies = [ [[package]] name = "miden-node-db" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "deadpool", "deadpool-diesel", @@ -2982,7 +2984,7 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "quote", "syn 2.0.114", @@ -2990,7 +2992,7 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "build-rs", @@ -3020,7 +3022,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", @@ -3046,7 +3048,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "build-rs", "fs-err", @@ -3057,11 +3059,11 @@ dependencies = [ [[package]] name = "miden-node-rocksdb-cxx-linkage-fix" -version = "0.14.0" +version = "0.14.0-alpha.1" [[package]] name = "miden-node-rpc" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "futures", @@ -3093,7 +3095,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", @@ -3141,7 +3143,7 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "clap", "fs-err", @@ -3171,7 +3173,7 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "bytes", @@ -3198,7 +3200,7 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "aws-config", @@ -3243,8 +3245,9 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a88effeac994eb55b8dc4f93fbfd71a5d916dfaba1099896e27a0ee42c488c1" dependencies = [ "bech32", "fs-err", @@ -3273,8 +3276,9 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bb28b730005e5f8b08d615ea9216f8cab77b3a7439fa54d5e39d2ec43ef53a3" dependencies = [ "proc-macro2", "quote", @@ -3297,7 +3301,7 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", @@ -3335,7 +3339,7 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.14.0" +version = "0.14.0-alpha.1" dependencies = [ "build-rs", "fs-err", @@ -3356,8 +3360,9 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cef036bbfec29acba92751a13d05844bbcf080140201097b419c9ad1927e367" dependencies = [ "fs-err", "miden-assembly", @@ -3373,8 +3378,9 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e980777d0f7e6069942b14d4e7cb3d4d137b323ddfa15722a3bd21e9d13fdd2e" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3396,8 +3402,9 @@ dependencies = [ [[package]] name = "miden-tx" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c67e0df9adcf29c9111df65acf408ae05952b8bc6569f571963676f97668d83f" dependencies = [ "miden-processor", "miden-protocol", @@ -3409,8 +3416,9 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#592fcbe09ee1281da8e973d99e1b38e3e461e8a3" +version = "0.14.0-alpha.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba29f8f6ecae671eff8b52b4c19eca8db5964c0b45b5d68c3ce38a57a8367931" dependencies = [ "miden-protocol", "miden-tx", diff --git a/Cargo.toml b/Cargo.toml index a6cd8d68f..9da92a6db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/node" rust-version = "1.91" -version = "0.14.0" +version = "0.14.0-alpha.1" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] @@ -46,31 +46,30 @@ debug = true [workspace.dependencies] # Workspace crates. -miden-large-smt-backend-rocksdb = { path = "crates/large-smt-backend-rocksdb", version = "0.14" } -miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } -miden-node-db = { path = "crates/db", version = "0.14" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } -miden-node-proto = { path = "crates/proto", version = "0.14" } -miden-node-proto-build = { path = "proto", version = "0.14" } -miden-node-rpc = { path = "crates/rpc", version = "0.14" } -miden-node-store = { path = "crates/store", version = "0.14" } +miden-large-smt-backend-rocksdb = { path = "crates/large-smt-backend-rocksdb", version = "=0.14.0-alpha.1" } +miden-node-block-producer = { path = "crates/block-producer", version = "=0.14.0-alpha.1" } +miden-node-db = { path = "crates/db", version = "=0.14.0-alpha.1" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "=0.14.0-alpha.1" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "=0.14.0-alpha.1" } +miden-node-proto = { path = "crates/proto", version = "=0.14.0-alpha.1" } +miden-node-proto-build = { path = "proto", version = "=0.14.0-alpha.1" } +miden-node-rpc = { path = "crates/rpc", version = "=0.14.0-alpha.1" } +miden-node-store = { path = "crates/store", version = "=0.14.0-alpha.1" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.14" } -miden-node-validator = { path = "crates/validator", version = "0.14" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } - +miden-node-utils = { path = "crates/utils", version = "=0.14.0-alpha.1" } +miden-node-validator = { path = "crates/validator", version = "=0.14.0-alpha.1" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "=0.14.0-alpha.1" } # Temporary workaround until # is part of `rocksdb-rust` release -miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "0.14" } +miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "=0.14.0-alpha.1" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } -miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } -miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } -miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } -miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } -miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-block-prover = { version = "=0.14.0-alpha.1" } +miden-protocol = { default-features = false, version = "=0.14.0-alpha.1" } +miden-standards = { version = "=0.14.0-alpha.1" } +miden-testing = { version = "=0.14.0-alpha.1" } +miden-tx = { default-features = false, version = "=0.14.0-alpha.1" } +miden-tx-batch-prover = { version = "=0.14.0-alpha.1" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index 235905f13..b89c09aa0 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -11,7 +11,7 @@ use anyhow::{Context, Result}; use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; -use miden_protocol::account::{Account, AccountId, PartialAccount, PartialStorage}; +use miden_protocol::account::{Account, AccountId, PartialAccount, PartialStorage, StorageMapKey}; use miden_protocol::assembly::{ DefaultSourceManager, Library, @@ -308,7 +308,7 @@ impl DataStore for MonitorDataStore { &self, _account_id: AccountId, _map_root: Word, - _map_key: Word, + _map_key: StorageMapKey, ) -> Result { unimplemented!("Not needed") } diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 09658cd23..9b45a48c2 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -11,6 +11,7 @@ use miden_protocol::account::{ AccountId, AccountStorageHeader, PartialAccount, + StorageMapKey, StorageMapWitness, StorageSlotName, StorageSlotType, @@ -508,7 +509,7 @@ impl DataStore for NtxDataStore { &self, account_id: AccountId, map_root: Word, - map_key: Word, + map_key: StorageMapKey, ) -> impl FutureMaybeSend> { async move { // The slot name that corresponds to the given account ID and map root must have been diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index f62746f88..1f8c7b5f7 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -16,6 +16,7 @@ use miden_protocol::account::{ AccountId, PartialAccount, PartialStorage, + StorageMapKey, StorageMapWitness, StorageSlotName, }; @@ -421,7 +422,7 @@ impl StoreClient { &self, account_id: AccountId, slot_name: StorageSlotName, - map_key: Word, + map_key: StorageMapKey, block_num: Option, ) -> Result { // Construct proto request. diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 8e06d3369..aeec88832 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -9,6 +9,7 @@ use miden_protocol::account::{ AccountId, AccountStorageHeader, StorageMap, + StorageMapKey, StorageSlotHeader, StorageSlotName, StorageSlotType, @@ -223,7 +224,7 @@ impl TryFrom), + MapKeys(Vec), } impl @@ -426,7 +427,7 @@ pub enum StorageMapEntries { /// All storage map entries (key-value pairs) without proofs. /// Used when all entries are requested for small maps. - AllEntries(Vec<(Word, Word)>), + AllEntries(Vec<(StorageMapKey, Word)>), /// Specific entries with their SMT proofs for client-side verification. /// Used when specific keys are requested from the storage map. @@ -468,7 +469,10 @@ impl AccountStorageMapDetails { /// Creates storage map details from forest-queried entries. /// /// Returns `LimitExceeded` if too many entries. - pub fn from_forest_entries(slot_name: StorageSlotName, entries: Vec<(Word, Word)>) -> Self { + pub fn from_forest_entries( + slot_name: StorageSlotName, + entries: Vec<(StorageMapKey, Word)>, + ) -> Self { if entries.len() > Self::MAX_RETURN_ENTRIES { Self { slot_name, @@ -551,7 +555,8 @@ impl TryFrom let key = entry .key .ok_or(StorageMapEntry::missing_field(stringify!(key)))? - .try_into()?; + .try_into() + .map(StorageMapKey::new)?; let value = entry .value .ok_or(StorageMapEntry::missing_field(stringify!(value)))? diff --git a/crates/proto/src/domain/account/tests.rs b/crates/proto/src/domain/account/tests.rs index 695813d99..c25511d60 100644 --- a/crates/proto/src/domain/account/tests.rs +++ b/crates/proto/src/domain/account/tests.rs @@ -1,3 +1,5 @@ +use miden_protocol::account::StorageMapKey; + use super::*; fn word_from_u32(arr: [u32; 4]) -> Word { @@ -12,8 +14,11 @@ fn test_slot_name() -> StorageSlotName { fn account_storage_map_details_from_forest_entries() { let slot_name = test_slot_name(); let entries = vec![ - (word_from_u32([1, 2, 3, 4]), word_from_u32([5, 6, 7, 8])), - (word_from_u32([9, 10, 11, 12]), word_from_u32([13, 14, 15, 16])), + (StorageMapKey::new(word_from_u32([1, 2, 3, 4])), word_from_u32([5, 6, 7, 8])), + ( + StorageMapKey::new(word_from_u32([9, 10, 11, 12])), + word_from_u32([13, 14, 15, 16]), + ), ]; let details = AccountStorageMapDetails::from_forest_entries(slot_name.clone(), entries.clone()); @@ -28,7 +33,7 @@ fn account_storage_map_details_from_forest_entries_limit_exceeded() { // Create more entries than MAX_RETURN_ENTRIES let entries: Vec<_> = (0..=AccountStorageMapDetails::MAX_RETURN_ENTRIES) .map(|i| { - let key = word_from_u32([i as u32, 0, 0, 0]); + let key = StorageMapKey::from_index(i as u32); let value = word_from_u32([0, 0, 0, i as u32]); (key, value) }) diff --git a/crates/proto/src/domain/digest.rs b/crates/proto/src/domain/digest.rs index 7be94e530..08d8c3f9a 100644 --- a/crates/proto/src/domain/digest.rs +++ b/crates/proto/src/domain/digest.rs @@ -1,6 +1,7 @@ use std::fmt::{Debug, Display, Formatter}; use hex::{FromHex, ToHex}; +use miden_protocol::account::StorageMapKey; use miden_protocol::note::NoteId; use miden_protocol::{Felt, StarkField, Word}; @@ -136,6 +137,18 @@ impl From<&Word> for proto::primitives::Digest { } } +impl From for proto::primitives::Digest { + fn from(value: StorageMapKey) -> Self { + Into::::into(value).into() + } +} + +impl From<&StorageMapKey> for proto::primitives::Digest { + fn from(value: &StorageMapKey) -> Self { + (*value).into() + } +} + impl From<&NoteId> for proto::primitives::Digest { fn from(value: &NoteId) -> Self { value.as_word().into() @@ -185,6 +198,14 @@ impl TryFrom for Word { } } +impl TryFrom for StorageMapKey { + type Error = ConversionError; + + fn try_from(value: proto::primitives::Digest) -> Result { + Ok(StorageMapKey::new(value.try_into()?)) + } +} + impl TryFrom<&proto::primitives::Digest> for [Felt; 4] { type Error = ConversionError; diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index a5531d46f..d0642a819 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -53,7 +53,7 @@ url = { workspace = true } [build-dependencies] build-rs = { workspace = true } fs-err = { workspace = true } -miden-agglayer = { branch = "next", features = ["testing"], git = "https://github.com/0xMiden/miden-base" } +miden-agglayer = { features = ["testing"], version = "=0.14.0-alpha.1" } miden-node-rocksdb-cxx-linkage-fix = { workspace = true } miden-protocol = { features = ["std"], workspace = true } miden-standards = { workspace = true } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index f859a826a..c74f83401 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -28,6 +28,7 @@ use miden_protocol::account::{ AccountStorageHeader, NonFungibleDeltaAction, StorageMap, + StorageMapKey, StorageSlot, StorageSlotContent, StorageSlotName, @@ -65,7 +66,7 @@ mod tests; type StorageMapValueRow = (i64, String, Vec, Vec); type StorageHeaderWithEntries = - (AccountStorageHeader, BTreeMap>); + (AccountStorageHeader, BTreeMap>); // NETWORK ACCOUNT TYPE // ================================================================================================ @@ -618,7 +619,7 @@ pub(crate) fn select_all_network_account_ids( pub struct StorageMapValue { pub block_num: BlockNumber, pub slot_name: StorageSlotName, - pub key: Word, + pub key: StorageMapKey, pub value: Word, } @@ -636,7 +637,7 @@ impl StorageMapValue { Ok(Self { block_num: BlockNumber::from_raw_sql(block_num)?, slot_name: StorageSlotName::from_raw_sql(slot_name)?, - key: Word::read_from_bytes(&key)?, + key: StorageMapKey::read_from_bytes(&key)?, value: Word::read_from_bytes(&value)?, }) } @@ -807,7 +808,7 @@ pub(crate) fn select_latest_account_storage_components( fn select_latest_storage_map_entries_all( conn: &mut SqliteConnection, account_id: &AccountId, -) -> Result>, DatabaseError> { +) -> Result>, DatabaseError> { use schema::account_storage_map_values as t; let map_values: Vec<(String, Vec, Vec)> = @@ -823,7 +824,7 @@ fn select_latest_storage_map_entries_for_slots( conn: &mut SqliteConnection, account_id: &AccountId, slot_names: &[StorageSlotName], -) -> Result>, DatabaseError> { +) -> Result>, DatabaseError> { use schema::account_storage_map_values as t; if slot_names.is_empty() { @@ -856,7 +857,7 @@ fn select_latest_storage_map_entries_for_slot( conn: &mut SqliteConnection, account_id: &AccountId, slot_name: &StorageSlotName, -) -> Result, DatabaseError> { +) -> Result, DatabaseError> { use schema::account_storage_map_values as t; let map_values: Vec<(String, Vec, Vec)> = @@ -871,13 +872,14 @@ fn select_latest_storage_map_entries_for_slot( fn group_storage_map_entries( map_values: Vec<(String, Vec, Vec)>, -) -> Result>, DatabaseError> { - let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); +) -> Result>, DatabaseError> { + let mut map_entries_by_slot: BTreeMap> = + BTreeMap::new(); for (slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; - let key = Word::read_from_bytes(&key_bytes)?; + let key = StorageMapKey::read_from_bytes(&key_bytes)?; let value = Word::read_from_bytes(&value_bytes)?; map_entries_by_slot.entry(slot_name).or_default().insert(key, value); } @@ -988,7 +990,7 @@ pub(crate) fn insert_account_storage_map_value( account_id: AccountId, block_num: BlockNumber, slot_name: StorageSlotName, - key: Word, + key: StorageMapKey, value: Word, ) -> Result { let account_id = account_id.to_bytes(); @@ -1023,7 +1025,7 @@ pub(crate) fn insert_account_storage_map_value( Ok(update_count + insert_count) } -type PendingStorageInserts = Vec<(AccountId, StorageSlotName, Word, Word)>; +type PendingStorageInserts = Vec<(AccountId, StorageSlotName, StorageMapKey, Word)>; type PendingAssetInserts = Vec<(AccountId, AssetVaultKey, Option)>; fn prepare_full_account_update( @@ -1121,7 +1123,7 @@ fn prepare_partial_account_update( let mut storage = Vec::new(); for (slot_name, map_delta) in delta.storage().maps() { for (key, value) in map_delta.entries() { - storage.push((account_id, slot_name.clone(), (*key).into(), *value)); + storage.push((account_id, slot_name.clone(), (*key).into_inner(), *value)); } } diff --git a/crates/store/src/db/models/queries/accounts/delta.rs b/crates/store/src/db/models/queries/accounts/delta.rs index 7a554130c..8bab2b122 100644 --- a/crates/store/src/db/models/queries/accounts/delta.rs +++ b/crates/store/src/db/models/queries/accounts/delta.rs @@ -18,6 +18,7 @@ use miden_protocol::account::{ AccountId, AccountStorageHeader, StorageMap, + StorageMapKey, StorageSlotHeader, StorageSlotName, }; @@ -198,7 +199,7 @@ pub(super) fn select_vault_balances_by_faucet_ids( pub(super) fn apply_storage_delta( header: &AccountStorageHeader, delta: &AccountStorageDelta, - map_entries: &BTreeMap>, + map_entries: &BTreeMap>, ) -> Result { let mut value_updates: BTreeMap<&StorageSlotName, Word> = BTreeMap::new(); let mut map_updates: BTreeMap<&StorageSlotName, Word> = BTreeMap::new(); @@ -215,9 +216,9 @@ pub(super) fn apply_storage_delta( let mut entries = map_entries.get(slot_name).cloned().unwrap_or_default(); for (key, value) in map_delta.entries() { if *value == EMPTY_WORD { - entries.remove(&(*key).into()); + entries.remove(&(*key).into_inner()); } else { - entries.insert((*key).into(), *value); + entries.insert((*key).into_inner(), *value); } } diff --git a/crates/store/src/db/models/queries/accounts/delta/tests.rs b/crates/store/src/db/models/queries/accounts/delta/tests.rs index 37e4db1f8..7f3100325 100644 --- a/crates/store/src/db/models/queries/accounts/delta/tests.rs +++ b/crates/store/src/db/models/queries/accounts/delta/tests.rs @@ -24,6 +24,7 @@ use miden_protocol::account::{ AccountStorageMode, AccountType, StorageMap, + StorageMapKey, StorageSlot, StorageSlotName, }; @@ -430,12 +431,12 @@ fn optimized_delta_updates_storage_map_header() { let mut conn = setup_test_db(); - let map_key = Word::from([ + let map_key = StorageMapKey::new(Word::from([ Felt::new(MAP_KEY_VALUES[0]), Felt::new(MAP_KEY_VALUES[1]), Felt::new(MAP_KEY_VALUES[2]), Felt::new(MAP_KEY_VALUES[3]), - ]); + ])); let map_value_initial = Word::from([ Felt::new(MAP_VALUE_INITIAL[0]), Felt::new(MAP_VALUE_INITIAL[1]), diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 46fad7649..572cab258 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -31,6 +31,7 @@ use miden_protocol::account::{ AccountVaultDelta, StorageMap, StorageMapDelta, + StorageMapKey, StorageSlot, StorageSlotContent, StorageSlotDelta, @@ -98,18 +99,19 @@ fn reconstruct_account_storage_at_block( .load(conn)?; // For each (slot_name, key) pair, keep only the latest entry - let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + let mut latest_map_entries: BTreeMap<(StorageSlotName, StorageMapKey), Word> = BTreeMap::new(); for (_, slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; - let key = Word::read_from_bytes(&key_bytes)?; + let key = StorageMapKey::read_from_bytes(&key_bytes)?; let value = Word::read_from_bytes(&value_bytes)?; latest_map_entries.entry((slot_name, key)).or_insert(value); } // Group entries by slot name - let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + let mut map_entries_by_slot: BTreeMap> = + BTreeMap::new(); for ((slot_name, key), value) in latest_map_entries { map_entries_by_slot.entry(slot_name).or_default().push((key, value)); } @@ -204,7 +206,7 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { fn create_account_with_map_storage( slot_name: StorageSlotName, - entries: Vec<(Word, Word)>, + entries: Vec<(StorageMapKey, Word)>, ) -> Account { let storage_map = StorageMap::with_entries(entries).unwrap(); let component_storage = vec![StorageSlot::with_map(slot_name, storage_map)]; @@ -236,7 +238,7 @@ fn create_account_with_map_storage( fn assert_storage_map_slot_entries( storage: &AccountStorage, slot_name: &StorageSlotName, - expected: &BTreeMap, + expected: &BTreeMap, ) { let slot = storage .slots() @@ -712,9 +714,9 @@ fn test_select_latest_account_storage_ordering_semantics() { insert_block_header(&mut conn, block_num); let slot_name = StorageSlotName::mock(0); - let key_1 = Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]); - let key_2 = Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]); - let key_3 = Word::from([Felt::new(3), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_1 = StorageMapKey::from_index(1); + let key_2 = StorageMapKey::from_index(2); + let key_3 = StorageMapKey::from_index(3); let value_1 = Word::from([Felt::new(10), Felt::ZERO, Felt::ZERO, Felt::ZERO]); let value_2 = Word::from([Felt::new(20), Felt::ZERO, Felt::ZERO, Felt::ZERO]); @@ -758,8 +760,8 @@ fn test_select_latest_account_storage_multiple_slots() { let slot_name_1 = StorageSlotName::mock(0); let slot_name_2 = StorageSlotName::mock(1); - let key_a = Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]); - let key_b = Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_a = StorageMapKey::from_index(1); + let key_b = StorageMapKey::from_index(2); let value_a = Word::from([Felt::new(11), Felt::ZERO, Felt::ZERO, Felt::ZERO]); let value_b = Word::from([Felt::new(22), Felt::ZERO, Felt::ZERO, Felt::ZERO]); @@ -822,8 +824,8 @@ fn test_select_latest_account_storage_slot_updates() { insert_block_header(&mut conn, block_2); let slot_name = StorageSlotName::mock(0); - let key_1 = Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]); - let key_2 = Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let key_1 = StorageMapKey::from_index(1); + let key_2 = StorageMapKey::from_index(2); let value_1 = Word::from([Felt::new(10), Felt::ZERO, Felt::ZERO, Felt::ZERO]); let value_2 = Word::from([Felt::new(20), Felt::ZERO, Felt::ZERO, Felt::ZERO]); diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 80ca27674..77fb083af 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -19,6 +19,7 @@ use miden_protocol::account::{ AccountStorageMode, AccountType, AccountVaultDelta, + StorageMapKey, StorageSlot, StorageSlotContent, StorageSlotDelta, @@ -950,8 +951,8 @@ fn sql_account_storage_map_values_insertion() { queries::upsert_accounts(conn, &[mock_block_account_update(account_id, 0)], block2).unwrap(); let slot_name = StorageSlotName::mock(3); - let key1 = Word::from([1u32, 2, 3, 4]); - let key2 = Word::from([5u32, 6, 7, 8]); + let key1 = StorageMapKey::new(Word::from([1u32, 2, 3, 4])); + let key2 = StorageMapKey::new(Word::from([5u32, 6, 7, 8])); let value1 = Word::from([10u32, 11, 12, 13]); let value2 = Word::from([20u32, 21, 22, 23]); let value3 = Word::from([30u32, 31, 32, 33]); @@ -1009,9 +1010,9 @@ fn select_storage_map_sync_values() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); let slot_name = StorageSlotName::mock(5); - let key1 = num_to_word(1); - let key2 = num_to_word(2); - let key3 = num_to_word(3); + let key1 = StorageMapKey::from_index(1u32); + let key2 = StorageMapKey::from_index(2u32); + let key3 = StorageMapKey::from_index(3u32); let value1 = num_to_word(10); let value2 = num_to_word(20); let value3 = num_to_word(30); @@ -1440,11 +1441,11 @@ async fn genesis_with_account_storage_map() { let storage_map = StorageMap::with_entries(vec![ ( - Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(1u32), Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), ), ( - Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(2u32), Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), ), ]) @@ -1497,7 +1498,7 @@ async fn genesis_with_account_assets_and_storage() { let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); let storage_map = StorageMap::with_entries(vec![( - Word::from([Felt::new(100), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(100u32), Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]), )]) .unwrap(); @@ -1594,7 +1595,7 @@ async fn genesis_with_multiple_accounts() { .unwrap(); let storage_map = StorageMap::with_entries(vec![( - Word::from([Felt::new(5), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(5u32), Word::from([Felt::new(15), Felt::new(25), Felt::new(35), Felt::new(45)]), )]) .unwrap(); @@ -2005,7 +2006,7 @@ fn db_roundtrip_storage_map_values() { queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) .unwrap(); let slot_name = StorageSlotName::mock(5); - let key = num_to_word(12345); + let key = StorageMapKey::from_index(12345u32); let value = num_to_word(67890); queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) @@ -2051,11 +2052,11 @@ fn db_roundtrip_account_storage_with_maps() { // Create storage with both value slots and map slots let storage_map = StorageMap::with_entries(vec![ ( - Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(1u32), Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), ), ( - Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(2u32), Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), ), ]) @@ -2289,9 +2290,9 @@ fn test_prune_history() { // Insert storage map values at different blocks let slot_name = StorageSlotName::mock(5); - let map_key_old = num_to_word(10); - let map_key_cutoff = num_to_word(20); - let map_key_recent = num_to_word(30); + let map_key_old = StorageMapKey::from_index(10u32); + let map_key_cutoff = StorageMapKey::from_index(20u32); + let map_key_recent = StorageMapKey::from_index(30u32); let value_1 = num_to_word(111); let value_2 = num_to_word(222); let value_3 = num_to_word(333); diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index e70bdf0d3..2d0a17242 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -5,7 +5,7 @@ use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountV use miden_protocol::account::{ AccountId, NonFungibleDeltaAction, - StorageMap, + StorageMapKey, StorageMapWitness, StorageSlotName, }; @@ -70,7 +70,8 @@ pub(crate) struct InnerForest { /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. /// Accumulated from deltas - each block's entries include all entries up to that point. - storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + storage_entries: + BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, /// Maps (`account_id`, `block_num`) to vault SMT root. /// Tracks asset vault versions across all blocks with structural sharing. @@ -142,13 +143,13 @@ impl InnerForest { account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, - raw_key: Word, + raw_key: StorageMapKey, ) -> Result { - let key = StorageMap::hash_key(raw_key); + let key_hash = raw_key.hash(); let root = self .get_storage_map_root(account_id, slot_name, block_num) .ok_or(WitnessError::RootNotFound)?; - let proof = self.forest.open(root, key)?; + let proof = self.forest.open(root, key_hash.into())?; Ok(StorageMapWitness::new(proof, vec![raw_key])?) } @@ -182,14 +183,14 @@ impl InnerForest { account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, - raw_keys: &[Word], + raw_keys: &[StorageMapKey], ) -> Option> { let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { - let key = StorageMap::hash_key(*raw_key); - self.forest.open(root, key) + let key_hash = raw_key.hash(); + self.forest.open(root, key_hash.into()) })); Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) @@ -366,12 +367,12 @@ impl InnerForest { let prev_root = self.get_latest_storage_map_root(account_id, slot_name); assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); - let raw_map_entries: Vec<(Word, Word)> = + let raw_map_entries: Vec<(StorageMapKey, Word)> = Vec::from_iter(map_delta.entries().iter().filter_map(|(&key, &value)| { if value == EMPTY_WORD { None } else { - Some((Word::from(key), value)) + Some((key.into_inner(), value)) } })); @@ -385,7 +386,7 @@ impl InnerForest { } let hashed_entries: Vec<(Word, Word)> = Vec::from_iter( - raw_map_entries.iter().map(|(key, value)| (StorageMap::hash_key(*key), *value)), + raw_map_entries.iter().map(|(key, value)| (key.hash().into(), *value)), ); let new_root = self.forest.batch_insert(prev_root, hashed_entries.iter().copied())?; @@ -529,7 +530,7 @@ impl InnerForest { &self, account_id: AccountId, slot_name: &StorageSlotName, - ) -> BTreeMap { + ) -> BTreeMap { self.storage_entries .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) @@ -565,15 +566,17 @@ impl InnerForest { let prev_root = self.get_latest_storage_map_root(account_id, slot_name); let delta_entries = Vec::from_iter( - map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)), + map_delta.entries().iter().map(|(key, value)| ((*key).into_inner(), *value)), ); if delta_entries.is_empty() { continue; } - let hashed_entries = - delta_entries.iter().map(|(key, value)| (StorageMap::hash_key(*key), *value)); + let hashed_entries: Vec<(Word, Word)> = delta_entries + .iter() + .map(|(key, value): &(StorageMapKey, Word)| (key.hash().into(), *value)) + .collect(); let updated_root = self.forest.batch_insert(prev_root, hashed_entries)?; diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 1c043a2d6..3045822c7 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,4 +1,4 @@ -use miden_protocol::account::AccountCode; +use miden_protocol::account::{AccountCode, StorageMapKey}; use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, @@ -323,7 +323,7 @@ fn test_update_storage_map() { let block_num = BlockNumber::GENESIS.child(); let slot_name = StorageSlotName::mock(3); - let key = Word::from([1u32, 2, 3, 4]); + let key = StorageMapKey::new(Word::from([1u32, 2, 3, 4])); let value = Word::from([5u32, 6, 7, 8]); let mut map_delta = StorageMapDelta::default(); @@ -403,8 +403,8 @@ fn test_storage_map_incremental_updates() { let account_id = dummy_account(); let slot_name = StorageSlotName::mock(3); - let key1 = Word::from([1u32, 0, 0, 0]); - let key2 = Word::from([2u32, 0, 0, 0]); + let key1 = StorageMapKey::from_index(1u32); + let key2 = StorageMapKey::from_index(2u32); let value1 = Word::from([10u32, 0, 0, 0]); let value2 = Word::from([20u32, 0, 0, 0]); let value3 = Word::from([30u32, 0, 0, 0]); @@ -452,16 +452,14 @@ fn test_storage_map_removals() { use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; const SLOT_INDEX: usize = 3; - const KEY_1: [u32; 4] = [1, 0, 0, 0]; - const KEY_2: [u32; 4] = [2, 0, 0, 0]; const VALUE_1: [u32; 4] = [10, 0, 0, 0]; const VALUE_2: [u32; 4] = [20, 0, 0, 0]; let mut forest = InnerForest::new(); let account_id = dummy_account(); let slot_name = StorageSlotName::mock(SLOT_INDEX); - let key_1 = Word::from(KEY_1); - let key_2 = Word::from(KEY_2); + let key_1 = StorageMapKey::from_index(1); + let key_2 = StorageMapKey::from_index(2); let value_1 = Word::from(VALUE_1); let value_2 = Word::from(VALUE_2); diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 495bda834..f6e8d4a7a 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -7,7 +7,7 @@ use miden_node_proto::generated as proto; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::store::ntx_builder_server; use miden_node_utils::ErrorReport; -use miden_protocol::account::StorageSlotName; +use miden_protocol::account::{StorageMapKey, StorageSlotName}; use miden_protocol::asset::AssetVaultKey; use miden_protocol::block::BlockNumber; use miden_protocol::note::Note; @@ -262,8 +262,9 @@ impl ntx_builder_server::NtxBuilder for StoreApi { read_account_id::(request.account_id).map_err(invalid_argument)?; // Read the map key. - let map_key = - read_root::(request.map_key, "MapKey").map_err(invalid_argument)?; + let map_key = read_root::(request.map_key, "MapKey") + .map(StorageMapKey::new) + .map_err(invalid_argument)?; // Read the slot name. let slot_name = StorageSlotName::new(request.slot_name).map_err(|err| { diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 40f6f29e6..c19699d00 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -23,7 +23,7 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::formatting::format_array; use miden_protocol::Word; -use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; +use miden_protocol::account::{AccountId, StorageMapKey, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; @@ -830,7 +830,7 @@ impl State { account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, - raw_key: Word, + raw_key: StorageMapKey, ) -> Result { let witness = self .forest diff --git a/crates/validator/src/tx_validation/data_store.rs b/crates/validator/src/tx_validation/data_store.rs index ebd382e44..ac143ef3b 100644 --- a/crates/validator/src/tx_validation/data_store.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -3,7 +3,7 @@ use std::collections::BTreeSet; use miden_protocol::Word; -use miden_protocol::account::{AccountId, PartialAccount, StorageMapWitness}; +use miden_protocol::account::{AccountId, PartialAccount, StorageMapKey, StorageMapWitness}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::NoteScript; @@ -83,7 +83,7 @@ impl DataStore for TransactionInputsDataStore { &self, _account_id: AccountId, _map_root: Word, - _map_key: Word, + _map_key: StorageMapKey, ) -> impl FutureMaybeSend> { async move { unimplemented!( diff --git a/scripts/check-msrv.sh b/scripts/check-msrv.sh new file mode 100755 index 000000000..6058a0ace --- /dev/null +++ b/scripts/check-msrv.sh @@ -0,0 +1,153 @@ +#!/bin/bash +set -e +set -o pipefail + +# Enhanced MSRV checking script for workspace repository +# Checks MSRV for each workspace member and provides helpful error messages + +# ---- utilities -------------------------------------------------------------- + +check_command() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "ERROR: Required command '$1' is not installed or not in PATH" + exit 1 + fi +} + +# Check required commands +check_command "cargo" +check_command "jq" +check_command "rustup" +check_command "sed" +check_command "grep" +check_command "awk" + +# Portable in-place sed (GNU/macOS); usage: sed_i 's/foo/bar/' file +# shellcheck disable=SC2329 # used quoted +sed_i() { + if sed --version >/dev/null 2>&1; then + sed -i "$@" + else + sed -i '' "$@" + fi +} + +# ---- repo root -------------------------------------------------------------- + +# Get the directory where this script is located and change to the parent directory +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd "$DIR/.." + +echo "Checking MSRV for workspace members..." + +# ---- metadata -------------------------------------------------------------- + +metadata_json="$(cargo metadata --no-deps --format-version 1)" +workspace_root="$(printf '%s' "$metadata_json" | jq -r '.workspace_root')" + +failed_packages="" + +# Iterate actual workspace packages with manifest paths and (maybe) rust_version +# Fields per line (TSV): id name manifest_path rust_version_or_empty +while IFS=$'\t' read -r pkg_id package_name manifest_path rust_version; do + # Derive package directory (avoid external dirname for portability) + package_dir="${manifest_path%/*}" + if [[ -z "$package_dir" || "$package_dir" == "$manifest_path" ]]; then + package_dir="." + fi + + echo "Checking $package_name ($pkg_id) in $package_dir" + + if [[ ! -f "$package_dir/Cargo.toml" ]]; then + echo "WARNING: No Cargo.toml found in $package_dir, skipping..." + continue + fi + + # Prefer cargo metadata's effective rust_version if present + current_msrv="$rust_version" + if [[ -z "$current_msrv" ]]; then + # If the crate inherits: rust-version.workspace = true + if grep -Eq '^\s*rust-version\.workspace\s*=\s*true\b' "$package_dir/Cargo.toml"; then + # Read from workspace root [workspace.package] + current_msrv="$(grep -Eo '^\s*rust-version\s*=\s*"[^"]+"' "$workspace_root/Cargo.toml" | head -n1 | sed -E 's/.*"([^"]+)".*/\1/')" + if [[ -n "$current_msrv" ]]; then + echo " Using workspace MSRV: $current_msrv" + fi + fi + fi + + if [[ -z "$current_msrv" ]]; then + echo "WARNING: No rust-version found (package or workspace) for $package_name" + continue + fi + + echo " Current MSRV: $current_msrv" + + # Try to verify the MSRV + if ! cargo msrv verify --manifest-path "$package_dir/Cargo.toml" >/dev/null 2>&1; then + echo "ERROR: MSRV check failed for $package_name" + failed_packages="$failed_packages $package_name" + + echo "Searching for correct MSRV for $package_name..." + + # Determine the currently-installed stable toolchain version (e.g., "1.91.1") + latest_stable="$(rustup run stable rustc --version 2>/dev/null | awk '{print $2}')" + if [[ -z "$latest_stable" ]]; then latest_stable="1.91.1"; fi + + # Search for the actual MSRV starting from the current one + if actual_msrv=$(cargo msrv find \ + --manifest-path "$package_dir/Cargo.toml" \ + --min "$current_msrv" \ + --max "$latest_stable" \ + --output-format minimal 2>/dev/null); then + echo " Found actual MSRV: $actual_msrv" + echo "" + echo "ERROR SUMMARY for $package_name:" + echo " Package: $package_name" + echo " Directory: $package_dir" + echo " Current (incorrect) MSRV: $current_msrv" + echo " Correct MSRV: $actual_msrv" + echo "" + echo "TO FIX:" + echo " Update rust-version in $package_dir/Cargo.toml from \"$current_msrv\" to \"$actual_msrv\"" + echo "" + echo " Or run this command (portable in-place edit):" + echo " sed_i 's/^\\s*rust-version\\s*=\\s*\"$current_msrv\"/rust-version = \"$actual_msrv\"/' \"$package_dir/Cargo.toml\"" + else + echo " Could not determine correct MSRV automatically" + echo "" + echo "ERROR SUMMARY for $package_name:" + echo " Package: $package_name" + echo " Directory: $package_dir" + echo " Current (incorrect) MSRV: $current_msrv" + echo " Could not automatically determine correct MSRV" + echo "" + echo "TO FIX:" + echo " Run manually: cargo msrv find --manifest-path \"$package_dir/Cargo.toml\"" + fi + echo "-------------------------------------------------------------------------------" + else + echo "OK: MSRV check passed for $package_name" + fi + echo "" + +done < <( + printf '%s' "$metadata_json" \ + | jq -r '. as $m + | $m.workspace_members[] + | . as $id + | ($m.packages[] | select(.id == $id) + | [ .id, .name, .manifest_path, (.rust_version // "") ] | @tsv)' +) + +if [[ -n "$failed_packages" ]]; then + echo "MSRV CHECK FAILED" + echo "" + echo "The following packages have incorrect MSRV settings:$failed_packages" + echo "" + echo "Please fix the rust-version fields in the affected Cargo.toml files as shown above." + exit 1 +else + echo "ALL WORKSPACE MEMBERS PASSED MSRV CHECKS!" + exit 0 +fi From 0adb26923ec768a07f14ff9291301cdeeab8eb65 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Thu, 5 Mar 2026 12:29:55 +1300 Subject: [PATCH 76/77] chore: Refactor validator tx schema (#1735) --- .../db/migrations/2025062000000_setup/up.sql | 13 +++-- crates/validator/src/db/models.rs | 14 +++++- crates/validator/src/db/schema.rs | 9 +++- .../src/tx_validation/validated_tx.rs | 48 +++++++++++++++---- 4 files changed, 68 insertions(+), 16 deletions(-) diff --git a/crates/validator/src/db/migrations/2025062000000_setup/up.sql b/crates/validator/src/db/migrations/2025062000000_setup/up.sql index 06297a970..85753d87f 100644 --- a/crates/validator/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/validator/src/db/migrations/2025062000000_setup/up.sql @@ -1,8 +1,13 @@ CREATE TABLE validated_transactions ( - id BLOB NOT NULL, - block_num INTEGER NOT NULL, - account_id BLOB NOT NULL, - "transaction" BLOB NOT NULL, -- Binary encoded ExecutedTransaction. + id BLOB NOT NULL, + block_num INTEGER NOT NULL, + account_id BLOB NOT NULL, + account_delta BLOB, + input_notes BLOB, + output_notes BLOB, + initial_account_hash BLOB NOT NULL, + final_account_hash BLOB NOT NULL, + fee BLOB NOT NULL, PRIMARY KEY (id) ) WITHOUT ROWID; diff --git a/crates/validator/src/db/models.rs b/crates/validator/src/db/models.rs index 9a50b7a39..cb41197b7 100644 --- a/crates/validator/src/db/models.rs +++ b/crates/validator/src/db/models.rs @@ -12,7 +12,12 @@ pub struct ValidatedTransactionRowInsert { pub id: Vec, pub block_num: i64, pub account_id: Vec, - pub transaction: Vec, + pub account_delta: Vec, + pub input_notes: Vec, + pub output_notes: Vec, + pub initial_account_hash: Vec, + pub final_account_hash: Vec, + pub fee: Vec, } impl ValidatedTransactionRowInsert { @@ -21,7 +26,12 @@ impl ValidatedTransactionRowInsert { id: tx.tx_id().to_bytes(), block_num: tx.block_num().to_raw_sql(), account_id: tx.account_id().to_bytes(), - transaction: tx.to_bytes(), + account_delta: tx.account_delta().to_bytes(), + input_notes: tx.input_notes().to_bytes(), + output_notes: tx.output_notes().to_bytes(), + initial_account_hash: tx.initial_account_hash().to_bytes(), + final_account_hash: tx.final_account_hash().to_bytes(), + fee: tx.fee().amount().to_le_bytes().to_vec(), } } } diff --git a/crates/validator/src/db/schema.rs b/crates/validator/src/db/schema.rs index 0d299dbfd..380c68b9d 100644 --- a/crates/validator/src/db/schema.rs +++ b/crates/validator/src/db/schema.rs @@ -1,8 +1,13 @@ diesel::table! { - validated_transactions (id, block_num, account_id, transaction) { + validated_transactions (id) { id -> Binary, block_num -> BigInt, account_id -> Binary, - transaction -> Binary, + account_delta -> Binary, + input_notes -> Binary, + output_notes -> Binary, + initial_account_hash -> Binary, + final_account_hash -> Binary, + fee -> Binary, } } diff --git a/crates/validator/src/tx_validation/validated_tx.rs b/crates/validator/src/tx_validation/validated_tx.rs index 3ee7dfa45..0233c7e8f 100644 --- a/crates/validator/src/tx_validation/validated_tx.rs +++ b/crates/validator/src/tx_validation/validated_tx.rs @@ -1,7 +1,14 @@ -use miden_protocol::account::AccountId; +use miden_protocol::Word; +use miden_protocol::account::{AccountDelta, AccountId}; +use miden_protocol::asset::FungibleAsset; use miden_protocol::block::BlockNumber; -use miden_protocol::transaction::{ExecutedTransaction, TransactionId}; -use miden_tx::utils::Serializable; +use miden_protocol::transaction::{ + ExecutedTransaction, + InputNote, + InputNotes, + OutputNotes, + TransactionId, +}; /// Re-executed and validated transaction that the Validator, or some ad-hoc /// auditing procedure, might need to analyze. @@ -11,7 +18,7 @@ use miden_tx::utils::Serializable; pub struct ValidatedTransaction(ExecutedTransaction); impl ValidatedTransaction { - /// Creates a new instance of [`ValidatedTransactionInfo`]. + /// Creates a new instance of [`ValidatedTransaction`]. pub fn new(tx: ExecutedTransaction) -> Self { Self(tx) } @@ -28,11 +35,36 @@ impl ValidatedTransaction { /// Returns ID of the account against which this transaction was executed. pub fn account_id(&self) -> AccountId { - self.0.account_delta().id() + self.0.account_id() } - /// Returns the binary representation of the transaction info. - pub fn to_bytes(&self) -> Vec { - self.0.to_bytes() + /// Returns a description of changes between the initial and final account states. + pub fn account_delta(&self) -> &AccountDelta { + self.0.account_delta() + } + + /// Returns the notes consumed in this transaction. + pub fn input_notes(&self) -> &InputNotes { + self.0.input_notes() + } + + /// Returns the notes created in this transaction. + pub fn output_notes(&self) -> &OutputNotes { + self.0.output_notes() + } + + /// Returns the commitment of the initial account state. + pub fn initial_account_hash(&self) -> Word { + self.0.initial_account().initial_commitment() + } + + /// Returns the commitment of the final account state. + pub fn final_account_hash(&self) -> Word { + self.0.final_account().to_commitment() + } + + /// Returns the fee of the transaction. + pub fn fee(&self) -> FungibleAsset { + self.0.fee() } } From 2479faf50c0676602b131a648348331a56b5c82d Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:08:03 -0300 Subject: [PATCH 77/77] feat(monitor): add self-healing in network monitor (#1748) --- CHANGELOG.md | 1 + bin/network-monitor/src/counter.rs | 51 ++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63ec1514b..7f4644a3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ ### Fixes +- Fixed network monitor looping on stale wallet nonce after node restarts by re-syncing wallet state from RPC after repeated failures ([#1748](https://github.com/0xMiden/node/pull/1748)). - Fixed `bundled start` panicking due to duplicate `data_directory` clap argument name between `BundledCommand::Start` and `NtxBuilderConfig` ([#1732](https://github.com/0xMiden/node/pull/1732)). - Fixed `bundled bootstrap` requiring `--validator.key.hex` or `--validator.key.kms-id` despite a default key being configured ([#1732](https://github.com/0xMiden/node/pull/1732)). - Fixed incorrectly classifying private notes with the network attachment as network notes ([#1378](https://github.com/0xMiden/node/pull/1738)). diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index b1633b218..dd004ff0d 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -41,6 +41,9 @@ use rand_chacha::ChaCha20Rng; use tokio::sync::{Mutex, watch}; use tracing::{error, info, instrument, warn}; +/// Number of consecutive increment failures before re-syncing the wallet account from the RPC. +const RESYNC_FAILURE_THRESHOLD: usize = 3; + use crate::COMPONENT; use crate::config::MonitorConfig; use crate::deploy::counter::COUNTER_SLOT_NAME; @@ -395,6 +398,7 @@ pub async fn run_increment_task( let mut rng = ChaCha20Rng::from_os_rng(); let mut interval = tokio::time::interval(config.counter_increment_interval); + let mut consecutive_failures: usize = 0; loop { interval.tick().await; @@ -414,6 +418,8 @@ pub async fn run_increment_task( .await { Ok((tx_id, final_account, block_height)) => { + consecutive_failures = 0; + let target_value = handle_increment_success( &mut wallet_account, &final_account, @@ -433,7 +439,21 @@ pub async fn run_increment_task( } }, Err(e) => { + consecutive_failures += 1; last_error = Some(handle_increment_failure(&mut details, &e)); + + if consecutive_failures >= RESYNC_FAILURE_THRESHOLD { + if try_resync_wallet_account( + &mut rpc_client, + &mut wallet_account, + &mut data_store, + ) + .await + .is_ok() + { + consecutive_failures = 0; + } + } }, } @@ -478,6 +498,37 @@ fn handle_increment_success( Ok(new_expected) } +/// Re-sync the wallet account from the RPC after repeated failures. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.try_resync_wallet_account", + skip_all, + fields(account.id = %wallet_account.id()), + level = "warn", + err, +)] +async fn try_resync_wallet_account( + rpc_client: &mut RpcClient, + wallet_account: &mut Account, + data_store: &mut MonitorDataStore, +) -> Result<()> { + let fresh_account = fetch_wallet_account(rpc_client, wallet_account.id()) + .await + .inspect_err(|e| { + error!(account.id = %wallet_account.id(), err = ?e, "failed to re-sync wallet account from RPC"); + })? + .context("wallet account not found on-chain during re-sync") + .inspect_err(|e| { + error!(account.id = %wallet_account.id(), err = ?e, "wallet account not found on-chain during re-sync"); + })?; + + info!(account.id = %wallet_account.id(), "wallet account re-synced from RPC"); + *wallet_account = fresh_account; + data_store.update_account(wallet_account.clone()); + Ok(()) +} + /// Handle the failure path when creating/submitting the network note fails. fn handle_increment_failure(details: &mut IncrementDetails, error: &anyhow::Error) -> String { error!("Failed to create and submit network note: {:?}", error);