diff --git a/Cargo.lock b/Cargo.lock index d213815148..a95fd2d597 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1627,6 +1627,7 @@ dependencies = [ "substrate-runner", "subxt", "subxt-codegen", + "subxt-metadata", "syn 1.0.109", "test-runtime", "tokio", @@ -3653,7 +3654,6 @@ version = "0.28.0" dependencies = [ "impl-serde", "jsonrpsee", - "parity-scale-codec", "serde", "substrate-runner", "subxt", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 2b3d77ad57..8416005c37 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -32,7 +32,7 @@ serde_json = "1.0.96" # hex encoded metadata to bytes hex = "0.4.3" # actual metadata types -frame-metadata = { version = "15.0.0", features = ["v14", "std"] } +frame-metadata = { version = "15.1.0", features = ["v14", "v15-unstable", "std"] } # decode bytes into the metadata types scale = { package = "parity-scale-codec", version = "3.0.0", default-features = false } # generate the item mod for codegen diff --git a/cli/src/commands/compatibility.rs b/cli/src/commands/compatibility.rs index a3860487be..2e0c66dfbd 100644 --- a/cli/src/commands/compatibility.rs +++ b/cli/src/commands/compatibility.rs @@ -4,12 +4,14 @@ use clap::Parser as ClapParser; use color_eyre::eyre::{self, WrapErr}; -use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed, RuntimeMetadataV14, META_RESERVED}; +use frame_metadata::{ + v15::RuntimeMetadataV15, RuntimeMetadata, RuntimeMetadataPrefixed, META_RESERVED, +}; use jsonrpsee::client_transport::ws::Uri; use scale::Decode; use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use subxt_metadata::{get_metadata_hash, get_pallet_hash}; +use subxt_metadata::{get_metadata_hash, get_pallet_hash, metadata_v14_to_latest}; /// Verify metadata compatibility between substrate nodes. #[derive(Debug, ClapParser)] @@ -94,7 +96,7 @@ async fn handle_full_metadata(nodes: &[Uri]) -> color_eyre::Result<()> { Ok(()) } -async fn fetch_runtime_metadata(url: &Uri) -> color_eyre::Result { +async fn fetch_runtime_metadata(url: &Uri) -> color_eyre::Result { let bytes = subxt_codegen::utils::fetch_metadata_bytes(url).await?; let metadata = ::decode(&mut &bytes[..])?; @@ -108,7 +110,8 @@ async fn fetch_runtime_metadata(url: &Uri) -> color_eyre::Result Ok(v14), + RuntimeMetadata::V14(v14) => Ok(metadata_v14_to_latest(v14)), + RuntimeMetadata::V15(v15) => Ok(v15), _ => Err(eyre::eyre!( "Node {:?} with unsupported metadata version: {:?}", url, diff --git a/cli/src/commands/metadata.rs b/cli/src/commands/metadata.rs index f8d3aec83a..7b987edc74 100644 --- a/cli/src/commands/metadata.rs +++ b/cli/src/commands/metadata.rs @@ -8,7 +8,7 @@ use color_eyre::eyre; use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; use scale::{Decode, Encode}; use std::io::{self, Write}; -use subxt_metadata::retain_metadata_pallets; +use subxt_metadata::{metadata_v14_to_latest, retain_metadata_pallets}; /// Download metadata from a substrate node, for use with `subxt` codegen. #[derive(Debug, ClapParser)] @@ -20,6 +20,9 @@ pub struct Opts { format: String, /// Generate a subset of the metadata that contains only the /// types needed to represent the provided pallets. + /// + /// The returned metadata is updated to the latest available version + /// when using the option. #[clap(long, use_value_delimiter = true, value_parser)] pallets: Option>, } @@ -29,8 +32,9 @@ pub async fn run(opts: Opts) -> color_eyre::Result<()> { let mut metadata = ::decode(&mut &bytes[..])?; if let Some(pallets) = opts.pallets { - let metadata_v14 = match &mut metadata.1 { - RuntimeMetadata::V14(metadata_v14) => metadata_v14, + let mut metadata_v15 = match metadata.1 { + RuntimeMetadata::V14(metadata_v14) => metadata_v14_to_latest(metadata_v14), + RuntimeMetadata::V15(metadata_v15) => metadata_v15, _ => { return Err(eyre::eyre!( "Unsupported metadata version {:?}, expected V14.", @@ -39,9 +43,10 @@ pub async fn run(opts: Opts) -> color_eyre::Result<()> { } }; - retain_metadata_pallets(metadata_v14, |pallet_name| { + retain_metadata_pallets(&mut metadata_v15, |pallet_name| { pallets.iter().any(|p| &**p == pallet_name) }); + metadata = metadata_v15.into(); } match opts.format.as_str() { diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 8f65e8197d..97a298c41e 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -15,7 +15,7 @@ description = "Generate an API for interacting with a substrate node from FRAME [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "full"] } darling = "0.14.4" -frame-metadata = "15.0.0" +frame-metadata = { version = "15.1.0", features = ["v14", "v15-unstable", "std"] } heck = "0.4.1" proc-macro2 = "1.0.55" quote = "1.0.8" diff --git a/codegen/src/api/calls.rs b/codegen/src/api/calls.rs index dae3f124b3..37fe1e0e66 100644 --- a/codegen/src/api/calls.rs +++ b/codegen/src/api/calls.rs @@ -7,7 +7,7 @@ use crate::{ types::{CompositeDefFields, TypeGenerator}, CratePath, }; -use frame_metadata::{v14::RuntimeMetadataV14, PalletMetadata}; +use frame_metadata::v15::{PalletMetadata, RuntimeMetadataV15}; use heck::{ToSnakeCase as _, ToUpperCamelCase as _}; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; @@ -23,7 +23,7 @@ use scale_info::form::PortableForm; /// - `pallet` - Pallet metadata from which the calls are generated. /// - `types_mod_ident` - The ident of the base module that we can use to access the generated types from. pub fn generate_calls( - metadata: &RuntimeMetadataV14, + metadata: &RuntimeMetadataV15, type_gen: &TypeGenerator, pallet: &PalletMetadata, types_mod_ident: &syn::Ident, diff --git a/codegen/src/api/constants.rs b/codegen/src/api/constants.rs index 58dca550f9..c08745e826 100644 --- a/codegen/src/api/constants.rs +++ b/codegen/src/api/constants.rs @@ -3,7 +3,7 @@ // see LICENSE for license details. use crate::{types::TypeGenerator, CratePath}; -use frame_metadata::{v14::RuntimeMetadataV14, PalletMetadata}; +use frame_metadata::v15::{PalletMetadata, RuntimeMetadataV15}; use heck::ToSnakeCase as _; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; @@ -35,7 +35,7 @@ use super::CodegenError; /// - `pallet` - Pallet metadata from which the calls are generated. /// - `types_mod_ident` - The ident of the base module that we can use to access the generated types from. pub fn generate_constants( - metadata: &RuntimeMetadataV14, + metadata: &RuntimeMetadataV15, type_gen: &TypeGenerator, pallet: &PalletMetadata, types_mod_ident: &syn::Ident, diff --git a/codegen/src/api/events.rs b/codegen/src/api/events.rs index fce8b77f7d..01f46e989d 100644 --- a/codegen/src/api/events.rs +++ b/codegen/src/api/events.rs @@ -3,7 +3,7 @@ // see LICENSE for license details. use crate::{types::TypeGenerator, CratePath}; -use frame_metadata::PalletMetadata; +use frame_metadata::v15::PalletMetadata; use proc_macro2::TokenStream as TokenStream2; use quote::quote; use scale_info::form::PortableForm; diff --git a/codegen/src/api/mod.rs b/codegen/src/api/mod.rs index 308f2a03ce..ae52fb3f2a 100644 --- a/codegen/src/api/mod.rs +++ b/codegen/src/api/mod.rs @@ -9,7 +9,8 @@ mod constants; mod events; mod storage; -use subxt_metadata::get_metadata_per_pallet_hash; +use frame_metadata::v15::RuntimeMetadataV15; +use subxt_metadata::{get_metadata_per_pallet_hash, metadata_v14_to_latest}; use super::DerivesRegistry; use crate::error::CodegenError; @@ -20,7 +21,7 @@ use crate::{ CratePath, }; use codec::Decode; -use frame_metadata::{v14::RuntimeMetadataV14, RuntimeMetadata, RuntimeMetadataPrefixed}; +use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; use heck::ToSnakeCase as _; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; @@ -152,7 +153,7 @@ pub fn generate_runtime_api_from_bytes( /// Create the API for interacting with a Substrate runtime. pub struct RuntimeGenerator { - metadata: RuntimeMetadataV14, + metadata: RuntimeMetadataV15, } impl RuntimeGenerator { @@ -161,11 +162,20 @@ impl RuntimeGenerator { /// **Note:** If you have the metadata path, URL or bytes to hand, prefer to use /// one of the `generate_runtime_api_from_*` functions for generating the runtime API /// from that. + /// + /// # Panics + /// + /// Panics if the runtime metadata version is not supported. + /// + /// Supported versions: v14 and v15. pub fn new(metadata: RuntimeMetadataPrefixed) -> Self { - match metadata.1 { - RuntimeMetadata::V14(v14) => Self { metadata: v14 }, + let metadata = match metadata.1 { + RuntimeMetadata::V14(v14) => metadata_v14_to_latest(v14), + RuntimeMetadata::V15(v15) => v15, _ => panic!("Unsupported metadata version {:?}", metadata.1), - } + }; + + RuntimeGenerator { metadata } } /// Generate the API for interacting with a Substrate runtime. diff --git a/codegen/src/api/storage.rs b/codegen/src/api/storage.rs index a4d3a7f214..058730db43 100644 --- a/codegen/src/api/storage.rs +++ b/codegen/src/api/storage.rs @@ -3,8 +3,8 @@ // see LICENSE for license details. use crate::{types::TypeGenerator, CratePath}; -use frame_metadata::{ - v14::RuntimeMetadataV14, PalletMetadata, StorageEntryMetadata, StorageEntryModifier, +use frame_metadata::v15::{ + PalletMetadata, RuntimeMetadataV15, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, }; use heck::ToSnakeCase as _; @@ -24,7 +24,7 @@ use super::CodegenError; /// - `pallet` - Pallet metadata from which the storages are generated. /// - `types_mod_ident` - The ident of the base module that we can use to access the generated types from. pub fn generate_storage( - metadata: &RuntimeMetadataV14, + metadata: &RuntimeMetadataV15, type_gen: &TypeGenerator, pallet: &PalletMetadata, types_mod_ident: &syn::Ident, @@ -64,7 +64,7 @@ pub fn generate_storage( } fn generate_storage_entry_fns( - metadata: &RuntimeMetadataV14, + metadata: &RuntimeMetadataV15, type_gen: &TypeGenerator, pallet: &PalletMetadata, storage_entry: &StorageEntryMetadata, diff --git a/codegen/src/error.rs b/codegen/src/error.rs index 8d38bbfdba..411225e34a 100644 --- a/codegen/src/error.rs +++ b/codegen/src/error.rs @@ -13,10 +13,10 @@ pub enum CodegenError { #[error("Could not find type with ID {0} in the type registry; please raise a support issue.")] TypeNotFound(u32), /// Cannot fetch the metadata bytes. - #[error("Failed to fetch metadata, make sure that you're pointing at a node which is providing V14 metadata: {0}")] + #[error("Failed to fetch metadata, make sure that you're pointing at a node which is providing substrate-based metadata: {0}")] Fetch(#[from] FetchMetadataError), /// Failed IO for the metadata file. - #[error("Failed IO for {0}, make sure that you are providing the correct file path for metadata V14: {1}")] + #[error("Failed IO for {0}, make sure that you are providing the correct file path for metadata: {1}")] Io(String, std::io::Error), /// Cannot decode the metadata bytes. #[error("Could not decode metadata, only V14 metadata is supported: {0}")] @@ -25,7 +25,7 @@ pub enum CodegenError { #[error("Out-of-line subxt modules are not supported, make sure you are providing a body to your module: pub mod polkadot {{ ... }}")] InvalidModule(Span), /// Expected named or unnamed fields. - #[error("Fields should either be all named or all unnamed, make sure you are providing a valid metadata V14: {0}")] + #[error("Fields should either be all named or all unnamed, make sure you are providing a valid metadata: {0}")] InvalidFields(String), /// Substitute types must have a valid path. #[error("Type substitution error: {0}")] @@ -34,20 +34,20 @@ pub enum CodegenError { #[error("Invalid type path {0}: {1}")] InvalidTypePath(String, syn::Error), /// Metadata for constant could not be found. - #[error("Metadata for constant entry {0}_{1} could not be found. Make sure you are providing a valid metadata V14")] + #[error("Metadata for constant entry {0}_{1} could not be found. Make sure you are providing a valid substrate-based metadata")] MissingConstantMetadata(String, String), /// Metadata for storage could not be found. - #[error("Metadata for storage entry {0}_{1} could not be found. Make sure you are providing a valid metadata V14")] + #[error("Metadata for storage entry {0}_{1} could not be found. Make sure you are providing a valid substrate-based metadata")] MissingStorageMetadata(String, String), /// Metadata for call could not be found. - #[error("Metadata for call entry {0}_{1} could not be found. Make sure you are providing a valid metadata V14")] + #[error("Metadata for call entry {0}_{1} could not be found. Make sure you are providing a valid substrate-based metadata")] MissingCallMetadata(String, String), /// Call variant must have all named fields. - #[error("Call variant for type {0} must have all named fields. Make sure you are providing a valid metadata V14")] + #[error("Call variant for type {0} must have all named fields. Make sure you are providing a valid substrate-based metadata")] InvalidCallVariant(u32), /// Type should be an variant/enum. #[error( - "{0} type should be an variant/enum type. Make sure you are providing a valid metadata V14" + "{0} type should be an variant/enum type. Make sure you are providing a valid substrate-based metadata" )] InvalidType(String), } diff --git a/metadata/Cargo.toml b/metadata/Cargo.toml index 396f28b96c..d72d8fa6db 100644 --- a/metadata/Cargo.toml +++ b/metadata/Cargo.toml @@ -15,7 +15,7 @@ description = "Command line utilities for checking metadata compatibility betwee [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "full"] } -frame-metadata = "15.0.0" +frame-metadata = { version = "15.1.0", features = ["v14", "v15-unstable", "std"] } scale-info = "2.5.0" sp-core-hashing = "8.0.0" diff --git a/metadata/benches/bench.rs b/metadata/benches/bench.rs index ab9d240b1e..d264fbe5f3 100644 --- a/metadata/benches/bench.rs +++ b/metadata/benches/bench.rs @@ -4,21 +4,23 @@ use codec::Decode; use criterion::*; -use frame_metadata::{RuntimeMetadata::V14, RuntimeMetadataPrefixed, RuntimeMetadataV14}; +use frame_metadata::{v15::RuntimeMetadataV15, RuntimeMetadata, RuntimeMetadataPrefixed}; use scale_info::{form::PortableForm, TypeDef, TypeDefVariant}; use std::{fs, path::Path}; use subxt_metadata::{ get_call_hash, get_constant_hash, get_metadata_hash, get_pallet_hash, get_storage_hash, + metadata_v14_to_latest, }; -fn load_metadata() -> RuntimeMetadataV14 { +fn load_metadata() -> RuntimeMetadataV15 { let bytes = fs::read(Path::new("../artifacts/polkadot_metadata.scale")) .expect("Cannot read metadata blob"); let meta: RuntimeMetadataPrefixed = Decode::decode(&mut &*bytes).expect("Cannot decode scale metadata"); match meta.1 { - V14(v14) => v14, + RuntimeMetadata::V14(v14) => metadata_v14_to_latest(v14), + RuntimeMetadata::V15(v15) => v15, _ => panic!("Unsupported metadata version {:?}", meta.1), } } diff --git a/metadata/src/lib.rs b/metadata/src/lib.rs index d31ea9a92b..af042f8581 100644 --- a/metadata/src/lib.rs +++ b/metadata/src/lib.rs @@ -3,844 +3,100 @@ // see LICENSE for license details. mod retain; +mod validation; -use frame_metadata::{ - ExtrinsicMetadata, RuntimeMetadataV14, StorageEntryMetadata, StorageEntryType, -}; -pub use retain::retain_metadata_pallets; -use scale_info::{form::PortableForm, Field, PortableRegistry, TypeDef, Variant}; -use std::collections::HashSet; - -/// Internal byte representation for various metadata types utilized for -/// generating deterministic hashes between different rust versions. -#[repr(u8)] -enum TypeBeingHashed { - Composite, - Variant, - Sequence, - Array, - Tuple, - Primitive, - Compact, - BitSequence, -} - -/// Hashing function utilized internally. -fn hash(data: &[u8]) -> [u8; 32] { - sp_core_hashing::twox_256(data) -} - -/// XOR two hashes together. If we have two pseudorandom hashes, then this will -/// lead to another pseudorandom value. If there is potentially some pattern to -/// the hashes we are xoring (eg we might be xoring the same hashes a few times), -/// prefer `hash_hashes` to give us stronger pseudorandomness guarantees. -fn xor(a: [u8; 32], b: [u8; 32]) -> [u8; 32] { - let mut out = [0u8; 32]; - for (idx, (a, b)) in a.into_iter().zip(b).enumerate() { - out[idx] = a ^ b; - } - out -} - -/// Combine two hashes or hash-like sets of bytes together into a single hash. -/// `xor` is OK for one-off combinations of bytes, but if we are merging -/// potentially identical hashes, this is a safer way to ensure the result is -/// unique. -fn hash_hashes(a: [u8; 32], b: [u8; 32]) -> [u8; 32] { - let mut out = [0u8; 32 * 2]; - for (idx, byte) in a.into_iter().chain(b).enumerate() { - out[idx] = byte; - } - hash(&out) -} - -/// Obtain the hash representation of a `scale_info::Field`. -fn get_field_hash( - registry: &PortableRegistry, - field: &Field, - visited_ids: &mut HashSet, -) -> [u8; 32] { - let mut bytes = get_type_hash(registry, field.ty.id, visited_ids); - - // XOR name and field name with the type hash if they exist - if let Some(name) = &field.name { - bytes = xor(bytes, hash(name.as_bytes())); - } - - bytes -} - -/// Obtain the hash representation of a `scale_info::Variant`. -fn get_variant_hash( - registry: &PortableRegistry, - var: &Variant, - visited_ids: &mut HashSet, -) -> [u8; 32] { - // Merge our hashes of the name and each field together using xor. - let mut bytes = hash(var.name.as_bytes()); - for field in &var.fields { - bytes = hash_hashes(bytes, get_field_hash(registry, field, visited_ids)) - } - - bytes -} - -/// Obtain the hash representation of a `scale_info::TypeDef`. -fn get_type_def_hash( - registry: &PortableRegistry, - ty_def: &TypeDef, - visited_ids: &mut HashSet, -) -> [u8; 32] { - match ty_def { - TypeDef::Composite(composite) => { - let mut bytes = hash(&[TypeBeingHashed::Composite as u8]); - for field in &composite.fields { - bytes = hash_hashes(bytes, get_field_hash(registry, field, visited_ids)); - } - bytes - } - TypeDef::Variant(variant) => { - let mut bytes = hash(&[TypeBeingHashed::Variant as u8]); - for var in &variant.variants { - bytes = hash_hashes(bytes, get_variant_hash(registry, var, visited_ids)); - } - bytes - } - TypeDef::Sequence(sequence) => { - let bytes = hash(&[TypeBeingHashed::Sequence as u8]); - xor( - bytes, - get_type_hash(registry, sequence.type_param.id, visited_ids), - ) - } - TypeDef::Array(array) => { - // Take length into account; different length must lead to different hash. - let len_bytes = array.len.to_be_bytes(); - let bytes = hash(&[ - TypeBeingHashed::Array as u8, - len_bytes[0], - len_bytes[1], - len_bytes[2], - len_bytes[3], - ]); - xor( - bytes, - get_type_hash(registry, array.type_param.id, visited_ids), - ) - } - TypeDef::Tuple(tuple) => { - let mut bytes = hash(&[TypeBeingHashed::Tuple as u8]); - for field in &tuple.fields { - bytes = hash_hashes(bytes, get_type_hash(registry, field.id, visited_ids)); - } - bytes - } - TypeDef::Primitive(primitive) => { - // Cloning the 'primitive' type should essentially be a copy. - hash(&[TypeBeingHashed::Primitive as u8, primitive.clone() as u8]) - } - TypeDef::Compact(compact) => { - let bytes = hash(&[TypeBeingHashed::Compact as u8]); - xor( - bytes, - get_type_hash(registry, compact.type_param.id, visited_ids), - ) - } - TypeDef::BitSequence(bitseq) => { - let mut bytes = hash(&[TypeBeingHashed::BitSequence as u8]); - bytes = xor( - bytes, - get_type_hash(registry, bitseq.bit_order_type.id, visited_ids), - ); - bytes = xor( - bytes, - get_type_hash(registry, bitseq.bit_store_type.id, visited_ids), - ); - bytes - } - } -} - -/// Obtain the hash representation of a `scale_info::Type` identified by id. -fn get_type_hash(registry: &PortableRegistry, id: u32, visited_ids: &mut HashSet) -> [u8; 32] { - // Guard against recursive types and return a fixed arbitrary hash - if !visited_ids.insert(id) { - return hash(&[123u8]); - } - - let ty = registry.resolve(id).unwrap(); - get_type_def_hash(registry, &ty.type_def, visited_ids) -} - -/// Obtain the hash representation of a `frame_metadata::ExtrinsicMetadata`. -fn get_extrinsic_hash( - registry: &PortableRegistry, - extrinsic: &ExtrinsicMetadata, -) -> [u8; 32] { - let mut visited_ids = HashSet::::new(); - - let mut bytes = get_type_hash(registry, extrinsic.ty.id, &mut visited_ids); - - bytes = xor(bytes, hash(&[extrinsic.version])); - for signed_extension in extrinsic.signed_extensions.iter() { - let mut ext_bytes = hash(signed_extension.identifier.as_bytes()); - ext_bytes = xor( - ext_bytes, - get_type_hash(registry, signed_extension.ty.id, &mut visited_ids), - ); - ext_bytes = xor( - ext_bytes, - get_type_hash( - registry, - signed_extension.additional_signed.id, - &mut visited_ids, - ), - ); - bytes = hash_hashes(bytes, ext_bytes); - } - - bytes -} - -/// Get the hash corresponding to a single storage entry. -fn get_storage_entry_hash( - registry: &PortableRegistry, - entry: &StorageEntryMetadata, - visited_ids: &mut HashSet, -) -> [u8; 32] { - let mut bytes = hash(entry.name.as_bytes()); - // Cloning 'entry.modifier' should essentially be a copy. - bytes = xor(bytes, hash(&[entry.modifier.clone() as u8])); - bytes = xor(bytes, hash(&entry.default)); - - match &entry.ty { - StorageEntryType::Plain(ty) => { - bytes = xor(bytes, get_type_hash(registry, ty.id, visited_ids)); - } - StorageEntryType::Map { - hashers, - key, - value, - } => { - for hasher in hashers { - // Cloning the hasher should essentially be a copy. - bytes = hash_hashes(bytes, [hasher.clone() as u8; 32]); - } - bytes = xor(bytes, get_type_hash(registry, key.id, visited_ids)); - bytes = xor(bytes, get_type_hash(registry, value.id, visited_ids)); - } - } - - bytes -} - -/// Obtain the hash for a specific storage item, or an error if it's not found. -pub fn get_storage_hash( - metadata: &RuntimeMetadataV14, - pallet_name: &str, - storage_name: &str, -) -> Result<[u8; 32], NotFound> { - let pallet = metadata - .pallets - .iter() - .find(|p| p.name == pallet_name) - .ok_or(NotFound::Pallet)?; - - let storage = pallet.storage.as_ref().ok_or(NotFound::Item)?; - - let entry = storage - .entries - .iter() - .find(|s| s.name == storage_name) - .ok_or(NotFound::Item)?; - - let hash = get_storage_entry_hash(&metadata.types, entry, &mut HashSet::new()); - Ok(hash) -} - -/// Obtain the hash for a specific constant, or an error if it's not found. -pub fn get_constant_hash( - metadata: &RuntimeMetadataV14, - pallet_name: &str, - constant_name: &str, -) -> Result<[u8; 32], NotFound> { - let pallet = metadata - .pallets - .iter() - .find(|p| p.name == pallet_name) - .ok_or(NotFound::Pallet)?; - - let constant = pallet - .constants - .iter() - .find(|c| c.name == constant_name) - .ok_or(NotFound::Item)?; - - // We only need to check that the type of the constant asked for matches. - let bytes = get_type_hash(&metadata.types, constant.ty.id, &mut HashSet::new()); - Ok(bytes) -} - -/// Obtain the hash for a specific call, or an error if it's not found. -pub fn get_call_hash( - metadata: &RuntimeMetadataV14, - pallet_name: &str, - call_name: &str, -) -> Result<[u8; 32], NotFound> { - let pallet = metadata - .pallets - .iter() - .find(|p| p.name == pallet_name) - .ok_or(NotFound::Pallet)?; - - let call_id = pallet.calls.as_ref().ok_or(NotFound::Item)?.ty.id; - - let call_ty = metadata.types.resolve(call_id).ok_or(NotFound::Item)?; - - let call_variants = match &call_ty.type_def { - TypeDef::Variant(variant) => &variant.variants, - _ => return Err(NotFound::Item), - }; - - let variant = call_variants - .iter() - .find(|v| v.name == call_name) - .ok_or(NotFound::Item)?; - - // hash the specific variant representing the call we are interested in. - let hash = get_variant_hash(&metadata.types, variant, &mut HashSet::new()); - Ok(hash) -} - -/// Obtain the hash representation of a `frame_metadata::PalletMetadata`. -pub fn get_pallet_hash( - registry: &PortableRegistry, - pallet: &frame_metadata::PalletMetadata, -) -> [u8; 32] { - // Begin with some arbitrary hash (we don't really care what it is). - let mut bytes = hash(&[19]); - let mut visited_ids = HashSet::::new(); - - if let Some(calls) = &pallet.calls { - bytes = xor( - bytes, - get_type_hash(registry, calls.ty.id, &mut visited_ids), - ); - } - if let Some(ref event) = pallet.event { - bytes = xor( - bytes, - get_type_hash(registry, event.ty.id, &mut visited_ids), - ); - } - for constant in pallet.constants.iter() { - bytes = xor(bytes, hash(constant.name.as_bytes())); - bytes = xor( - bytes, - get_type_hash(registry, constant.ty.id, &mut visited_ids), - ); - } - if let Some(ref error) = pallet.error { - bytes = xor( - bytes, - get_type_hash(registry, error.ty.id, &mut visited_ids), - ); - } - if let Some(ref storage) = pallet.storage { - bytes = xor(bytes, hash(storage.prefix.as_bytes())); - for entry in storage.entries.iter() { - bytes = hash_hashes( - bytes, - get_storage_entry_hash(registry, entry, &mut visited_ids), - ); - } - } - - bytes -} - -/// Obtain the hash representation of a `frame_metadata::RuntimeMetadataV14`. -pub fn get_metadata_hash(metadata: &RuntimeMetadataV14) -> [u8; 32] { - // Collect all pairs of (pallet name, pallet hash). - let mut pallets: Vec<(&str, [u8; 32])> = metadata - .pallets - .iter() - .map(|pallet| { - let hash = get_pallet_hash(&metadata.types, pallet); - (&*pallet.name, hash) - }) - .collect(); - - // Sort by pallet name to create a deterministic representation of the underlying metadata. - pallets.sort_by_key(|&(name, _hash)| name); - - // Note: pallet name is excluded from hashing. - // Each pallet has a hash of 32 bytes, and the vector is extended with - // extrinsic hash and metadata ty hash (2 * 32). - let mut bytes = Vec::with_capacity(pallets.len() * 32 + 64); - for (_, hash) in pallets.iter() { - bytes.extend(hash) - } +use frame_metadata::{v14::RuntimeMetadataV14, v15::RuntimeMetadataV15}; - bytes.extend(get_extrinsic_hash(&metadata.types, &metadata.extrinsic)); - - let mut visited_ids = HashSet::::new(); - bytes.extend(get_type_hash( - &metadata.types, - metadata.ty.id, - &mut visited_ids, - )); - - hash(&bytes) -} - -/// Obtain the hash representation of a `frame_metadata::RuntimeMetadataV14` -/// hashing only the provided pallets. -/// -/// **Note:** This is similar to `get_metadata_hash`, but performs hashing only of the provided -/// pallets if they exist. There are cases where the runtime metadata contains a subset of -/// the pallets from the static metadata. In those cases, the static API can communicate -/// properly with the subset of pallets from the runtime node. -pub fn get_metadata_per_pallet_hash>( - metadata: &RuntimeMetadataV14, - pallets: &[T], -) -> [u8; 32] { - // Collect all pairs of (pallet name, pallet hash). - let mut pallets_hashed: Vec<(&str, [u8; 32])> = metadata - .pallets - .iter() - .filter_map(|pallet| { - // Make sure to filter just the pallets we are interested in. - let in_pallet = pallets - .iter() - .any(|pallet_ref| pallet_ref.as_ref() == pallet.name); - if in_pallet { - let hash = get_pallet_hash(&metadata.types, pallet); - Some((&*pallet.name, hash)) - } else { - None - } - }) - .collect(); - - // Sort by pallet name to create a deterministic representation of the underlying metadata. - pallets_hashed.sort_by_key(|&(name, _hash)| name); - - // Note: pallet name is excluded from hashing. - // Each pallet has a hash of 32 bytes, and the vector is extended with - // extrinsic hash and metadata ty hash (2 * 32). - let mut bytes = Vec::with_capacity(pallets_hashed.len() * 32); - for (_, hash) in pallets_hashed.iter() { - bytes.extend(hash) - } - - hash(&bytes) -} - -/// An error returned if we attempt to get the hash for a specific call, constant -/// or storage item that doesn't exist. -#[derive(Clone, Debug)] -pub enum NotFound { - Pallet, - Item, -} - -#[cfg(test)] -mod tests { - use super::*; - use bitvec::{order::Lsb0, vec::BitVec}; - use frame_metadata::{ - ExtrinsicMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, - PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeMetadataV14, - StorageEntryMetadata, StorageEntryModifier, - }; - use scale_info::meta_type; - - // Define recursive types. - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct A { - pub b: Box, - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct B { - pub a: Box, - } - - // Define TypeDef supported types. - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - // TypeDef::Composite with TypeDef::Array with Typedef::Primitive. - struct AccountId32([u8; 32]); - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - // TypeDef::Variant. - enum DigestItem { - PreRuntime( - // TypeDef::Array with primitive. - [::core::primitive::u8; 4usize], - // TypeDef::Sequence. - ::std::vec::Vec<::core::primitive::u8>, - ), - Other(::std::vec::Vec<::core::primitive::u8>), - // Nested TypeDef::Tuple. - RuntimeEnvironmentUpdated(((i8, i16), (u32, u64))), - // TypeDef::Compact. - Index(#[codec(compact)] ::core::primitive::u8), - // TypeDef::BitSequence. - BitSeq(BitVec), - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - // Ensure recursive types and TypeDef variants are captured. - struct MetadataTestType { - recursive: A, - composite: AccountId32, - type_def: DigestItem, - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - // Simulate a PalletCallMetadata. - enum Call { - #[codec(index = 0)] - FillBlock { ratio: AccountId32 }, - #[codec(index = 1)] - Remark { remark: DigestItem }, - } - - fn build_default_extrinsic() -> ExtrinsicMetadata { - ExtrinsicMetadata { - ty: meta_type::<()>(), - version: 0, - signed_extensions: vec![], - } - } - - fn default_pallet() -> PalletMetadata { - PalletMetadata { - name: "Test", - storage: None, - calls: None, - event: None, - constants: vec![], - error: None, - index: 0, - } - } - - fn build_default_pallets() -> Vec { - vec![ - PalletMetadata { - name: "First", - calls: Some(PalletCallMetadata { - ty: meta_type::(), - }), - ..default_pallet() - }, - PalletMetadata { - name: "Second", - index: 1, - calls: Some(PalletCallMetadata { - ty: meta_type::<(DigestItem, AccountId32, A)>(), - }), - ..default_pallet() - }, - ] - } - - fn pallets_to_metadata(pallets: Vec) -> RuntimeMetadataV14 { - RuntimeMetadataV14::new(pallets, build_default_extrinsic(), meta_type::<()>()) - } - - #[test] - fn different_pallet_index() { - let pallets = build_default_pallets(); - let mut pallets_swap = pallets.clone(); - - let metadata = pallets_to_metadata(pallets); - - // Change the order in which pallets are registered. - pallets_swap.swap(0, 1); - pallets_swap[0].index = 0; - pallets_swap[1].index = 1; - let metadata_swap = pallets_to_metadata(pallets_swap); - - let hash = get_metadata_hash(&metadata); - let hash_swap = get_metadata_hash(&metadata_swap); - - // Changing pallet order must still result in a deterministic unique hash. - assert_eq!(hash, hash_swap); - } - - #[test] - fn recursive_type() { - let mut pallet = default_pallet(); - pallet.calls = Some(PalletCallMetadata { - ty: meta_type::(), - }); - let metadata = pallets_to_metadata(vec![pallet]); - - // Check hashing algorithm finishes on a recursive type. - get_metadata_hash(&metadata); - } - - #[test] - /// Ensure correctness of hashing when parsing the `metadata.types`. - /// - /// Having a recursive structure `A: { B }` and `B: { A }` registered in different order - /// `types: { { id: 0, A }, { id: 1, B } }` and `types: { { id: 0, B }, { id: 1, A } }` - /// must produce the same deterministic hashing value. - fn recursive_types_different_order() { - let mut pallets = build_default_pallets(); - pallets[0].calls = Some(PalletCallMetadata { - ty: meta_type::(), - }); - pallets[1].calls = Some(PalletCallMetadata { - ty: meta_type::(), - }); - pallets[1].index = 1; - let mut pallets_swap = pallets.clone(); - let metadata = pallets_to_metadata(pallets); - - pallets_swap.swap(0, 1); - pallets_swap[0].index = 0; - pallets_swap[1].index = 1; - let metadata_swap = pallets_to_metadata(pallets_swap); - - let hash = get_metadata_hash(&metadata); - let hash_swap = get_metadata_hash(&metadata_swap); - - // Changing pallet order must still result in a deterministic unique hash. - assert_eq!(hash, hash_swap); - } - - #[test] - fn pallet_hash_correctness() { - let compare_pallets_hash = |lhs: &PalletMetadata, rhs: &PalletMetadata| { - let metadata = pallets_to_metadata(vec![lhs.clone()]); - let hash = get_metadata_hash(&metadata); - - let metadata = pallets_to_metadata(vec![rhs.clone()]); - let new_hash = get_metadata_hash(&metadata); - - assert_ne!(hash, new_hash); - }; - - // Build metadata progressively from an empty pallet to a fully populated pallet. - let mut pallet = default_pallet(); - let pallet_lhs = pallet.clone(); - pallet.storage = Some(PalletStorageMetadata { - prefix: "Storage", - entries: vec![StorageEntryMetadata { - name: "BlockWeight", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![], - docs: vec![], - }], - }); - compare_pallets_hash(&pallet_lhs, &pallet); - - let pallet_lhs = pallet.clone(); - // Calls are similar to: - // - // ``` - // pub enum Call { - // call_name_01 { arg01: type }, - // call_name_02 { arg01: type, arg02: type } - // } - // ``` - pallet.calls = Some(PalletCallMetadata { - ty: meta_type::(), - }); - compare_pallets_hash(&pallet_lhs, &pallet); - - let pallet_lhs = pallet.clone(); - // Events are similar to Calls. - pallet.event = Some(PalletEventMetadata { - ty: meta_type::(), - }); - compare_pallets_hash(&pallet_lhs, &pallet); - - let pallet_lhs = pallet.clone(); - pallet.constants = vec![PalletConstantMetadata { - name: "BlockHashCount", - ty: meta_type::(), - value: vec![96u8, 0, 0, 0], - docs: vec![], - }]; - compare_pallets_hash(&pallet_lhs, &pallet); - - let pallet_lhs = pallet.clone(); - pallet.error = Some(PalletErrorMetadata { - ty: meta_type::(), - }); - compare_pallets_hash(&pallet_lhs, &pallet); - } - - #[test] - fn metadata_per_pallet_hash_correctness() { - let pallets = build_default_pallets(); - - // Build metadata with just the first pallet. - let metadata_one = pallets_to_metadata(vec![pallets[0].clone()]); - // Build metadata with both pallets. - let metadata_both = pallets_to_metadata(pallets); - - // Hashing will ignore any non-existant pallet and return the same result. - let hash = get_metadata_per_pallet_hash(&metadata_one, &["First", "Second"]); - let hash_rhs = get_metadata_per_pallet_hash(&metadata_one, &["First"]); - assert_eq!(hash, hash_rhs, "hashing should ignore non-existant pallets"); - - // Hashing one pallet from metadata with 2 pallets inserted will ignore the second pallet. - let hash_second = get_metadata_per_pallet_hash(&metadata_both, &["First"]); - assert_eq!( - hash_second, hash, - "hashing one pallet should ignore the others" - ); - - // Check hashing with all pallets. - let hash_second = get_metadata_per_pallet_hash(&metadata_both, &["First", "Second"]); - assert_ne!( - hash_second, hash, - "hashing both pallets should produce a different result from hashing just one pallet" - ); - } - - #[test] - fn field_semantic_changes() { - // Get a hash representation of the provided meta type, - // inserted in the context of pallet metadata call. - let to_hash = |meta_ty| { - let pallet = PalletMetadata { - calls: Some(PalletCallMetadata { ty: meta_ty }), - ..default_pallet() - }; - let metadata = pallets_to_metadata(vec![pallet]); - get_metadata_hash(&metadata) - }; - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldNotNamedA { - First(u8), - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldNotNamedB { - First(u8), - } - // Semantic changes apply only to field names. - // This is considered to be a good tradeoff in hashing performance, as refactoring - // a structure / enum 's name is less likely to cause a breaking change. - // Even if the enums have different names, 'EnumFieldNotNamedA' and 'EnumFieldNotNamedB', - // they are equal in meaning (i.e, both contain `First(u8)`). - assert_eq!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct StructFieldNotNamedA([u8; 32]); - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct StructFieldNotNamedSecondB([u8; 32]); - // Similarly to enums, semantic changes apply only inside the structure fields. - assert_eq!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldNotNamed { - First(u8), - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldNotNamedSecond { - Second(u8), - } - // The enums are binary compatible, they contain a different semantic meaning: - // `First(u8)` and `Second(u8)`. - assert_ne!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldNamed { - First { a: u8 }, - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldNamedSecond { - First { b: u8 }, - } - // Named fields contain a different semantic meaning ('a' and 'b'). - assert_ne!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct StructFieldNamed { - a: u32, - } - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct StructFieldNamedSecond { - b: u32, - } - // Similar to enums, struct fields contain a different semantic meaning ('a' and 'b'). - assert_ne!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumField { - First, - // Field is unnamed, but has type name `u8`. - Second(u8), - // File is named and has type name `u8`. - Third { named: u8 }, - } - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - enum EnumFieldSwap { - Second(u8), - First, - Third { named: u8 }, - } - // Swapping the registration order should also be taken into account. - assert_ne!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); - - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct StructField { - a: u32, - b: u32, - } +pub use retain::retain_metadata_pallets; +pub use validation::{ + get_call_hash, get_constant_hash, get_metadata_hash, get_metadata_per_pallet_hash, + get_pallet_hash, get_storage_hash, NotFound, +}; - #[allow(dead_code)] - #[derive(scale_info::TypeInfo)] - struct StructFieldSwap { - b: u32, - a: u32, - } - assert_ne!( - to_hash(meta_type::()), - to_hash(meta_type::()) - ); +/// Convert the metadata V14 to the latest metadata version. +pub fn metadata_v14_to_latest(metadata: RuntimeMetadataV14) -> RuntimeMetadataV15 { + RuntimeMetadataV15 { + types: metadata.types, + pallets: metadata + .pallets + .into_iter() + .map(|pallet| frame_metadata::v15::PalletMetadata { + name: pallet.name, + storage: pallet + .storage + .map(|storage| frame_metadata::v15::PalletStorageMetadata { + prefix: storage.prefix, + entries: storage + .entries + .into_iter() + .map(|entry| { + let modifier = match entry.modifier { + frame_metadata::v14::StorageEntryModifier::Optional => { + frame_metadata::v15::StorageEntryModifier::Optional + } + frame_metadata::v14::StorageEntryModifier::Default => { + frame_metadata::v15::StorageEntryModifier::Default + } + }; + + let ty = match entry.ty { + frame_metadata::v14::StorageEntryType::Plain(ty) => { + frame_metadata::v15::StorageEntryType::Plain(ty) + }, + frame_metadata::v14::StorageEntryType::Map { + hashers, + key, + value, + } => frame_metadata::v15::StorageEntryType::Map { + hashers: hashers.into_iter().map(|hasher| match hasher { + frame_metadata::v14::StorageHasher::Blake2_128 => frame_metadata::v15::StorageHasher::Blake2_128, + frame_metadata::v14::StorageHasher::Blake2_256 => frame_metadata::v15::StorageHasher::Blake2_256, + frame_metadata::v14::StorageHasher::Blake2_128Concat => frame_metadata::v15::StorageHasher::Blake2_128Concat , + frame_metadata::v14::StorageHasher::Twox128 => frame_metadata::v15::StorageHasher::Twox128, + frame_metadata::v14::StorageHasher::Twox256 => frame_metadata::v15::StorageHasher::Twox256, + frame_metadata::v14::StorageHasher::Twox64Concat => frame_metadata::v15::StorageHasher::Twox64Concat, + frame_metadata::v14::StorageHasher::Identity=> frame_metadata::v15::StorageHasher::Identity, + }).collect(), + key, + value, + }, + }; + + frame_metadata::v15::StorageEntryMetadata { + name: entry.name, + modifier, + ty, + default: entry.default, + docs: entry.docs, + } + }) + .collect(), + }), + calls: pallet.calls.map(|calls| frame_metadata::v15::PalletCallMetadata { ty: calls.ty } ), + event: pallet.event.map(|event| frame_metadata::v15::PalletEventMetadata { ty: event.ty } ), + constants: pallet.constants.into_iter().map(|constant| frame_metadata::v15::PalletConstantMetadata { + name: constant.name, + ty: constant.ty, + value: constant.value, + docs: constant.docs, + } ).collect(), + error: pallet.error.map(|error| frame_metadata::v15::PalletErrorMetadata { ty: error.ty } ), + index: pallet.index, + docs: Default::default(), + }) + .collect(), + extrinsic: frame_metadata::v15::ExtrinsicMetadata { + ty: metadata.extrinsic.ty, + version: metadata.extrinsic.version, + signed_extensions: metadata.extrinsic.signed_extensions.into_iter().map(|ext| { + frame_metadata::v15::SignedExtensionMetadata { + identifier: ext.identifier, + ty: ext.ty, + additional_signed: ext.additional_signed, + } + }).collect() + }, + ty: metadata.ty, + apis: Default::default(), } } diff --git a/metadata/src/retain.rs b/metadata/src/retain.rs index 67cd06a05b..e0550643c3 100644 --- a/metadata/src/retain.rs +++ b/metadata/src/retain.rs @@ -4,7 +4,9 @@ //! Utility functions to generate a subset of the metadata. -use frame_metadata::{ExtrinsicMetadata, PalletMetadata, RuntimeMetadataV14, StorageEntryType}; +use frame_metadata::v15::{ + ExtrinsicMetadata, PalletMetadata, RuntimeMetadataV15, StorageEntryType, +}; use scale_info::{form::PortableForm, interner::UntrackedSymbol, TypeDef}; use std::{ any::TypeId, @@ -120,7 +122,7 @@ fn update_type(ty: &mut UntrackedSymbol, map_ids: &BTreeMap) { /// Strip any pallets out of the RuntimeCall type that aren't the ones we want to keep. /// The RuntimeCall type is referenced in a bunch of places, so doing this prevents us from /// holding on to stuff in pallets we've asked not to keep. -fn retain_pallets_in_runtime_call_type(metadata: &mut RuntimeMetadataV14, mut filter: F) +fn retain_pallets_in_runtime_call_type(metadata: &mut RuntimeMetadataV15, mut filter: F) where F: FnMut(&str) -> bool, { @@ -161,7 +163,7 @@ where /// /// Panics if the [`scale_info::PortableRegistry`] did not retain all needed types, /// or the metadata does not contain the "sp_runtime::DispatchError" type. -pub fn retain_metadata_pallets(metadata: &mut RuntimeMetadataV14, mut filter: F) +pub fn retain_metadata_pallets(metadata: &mut RuntimeMetadataV15, mut filter: F) where F: FnMut(&str) -> bool, { @@ -214,18 +216,20 @@ where #[cfg(test)] mod tests { use super::*; + use crate::metadata_v14_to_latest; use codec::Decode; - use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed, RuntimeMetadataV14}; + use frame_metadata::{v15::RuntimeMetadataV15, RuntimeMetadata, RuntimeMetadataPrefixed}; use std::{fs, path::Path}; - fn load_metadata() -> RuntimeMetadataV14 { + fn load_metadata() -> RuntimeMetadataV15 { let bytes = fs::read(Path::new("../artifacts/polkadot_metadata.scale")) .expect("Cannot read metadata blob"); let meta: RuntimeMetadataPrefixed = Decode::decode(&mut &*bytes).expect("Cannot decode scale metadata"); match meta.1 { - RuntimeMetadata::V14(v14) => v14, + RuntimeMetadata::V14(v14) => metadata_v14_to_latest(v14), + RuntimeMetadata::V15(v15) => v15, _ => panic!("Unsupported metadata version {:?}", meta.1), } } diff --git a/metadata/src/validation.rs b/metadata/src/validation.rs new file mode 100644 index 0000000000..a325d69a4b --- /dev/null +++ b/metadata/src/validation.rs @@ -0,0 +1,864 @@ +// Copyright 2019-2023 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Utility functions for metadata validation. + +use frame_metadata::v15::{ + ExtrinsicMetadata, PalletMetadata, RuntimeMetadataV15, StorageEntryMetadata, StorageEntryType, +}; +use scale_info::{form::PortableForm, Field, PortableRegistry, TypeDef, Variant}; +use std::collections::HashSet; + +/// Start with a predefined hashing value for the pallets. +const MAGIC_PALLET_VALUE: &[u8] = &[19]; + +/// Predefined value to be returned when we already visited a type. +const MAGIC_RECURSIVE_TYPE_VALUE: &[u8] = &[123]; + +// The number of bytes our `hash` function produces. +const HASH_LEN: usize = 32; + +/// Internal byte representation for various metadata types utilized for +/// generating deterministic hashes between different rust versions. +#[repr(u8)] +enum TypeBeingHashed { + Composite, + Variant, + Sequence, + Array, + Tuple, + Primitive, + Compact, + BitSequence, +} + +/// Hashing function utilized internally. +fn hash(data: &[u8]) -> [u8; 32] { + sp_core_hashing::twox_256(data) +} + +/// XOR two hashes together. If we have two pseudorandom hashes, then this will +/// lead to another pseudorandom value. If there is potentially some pattern to +/// the hashes we are xoring (eg we might be xoring the same hashes a few times), +/// prefer `concat_and_hash` to give us stronger pseudorandomness guarantees. +fn xor(a: [u8; 32], b: [u8; 32]) -> [u8; 32] { + let mut out = [0u8; 32]; + for (idx, (a, b)) in a.into_iter().zip(b).enumerate() { + out[idx] = a ^ b; + } + out +} + +/// Combine two hashes or hash-like sets of bytes together into a single hash. +/// `xor` is OK for one-off combinations of bytes, but if we are merging +/// potentially identical hashes, this is a safer way to ensure the result is +/// unique. +fn concat_and_hash(a: [u8; 32], b: [u8; 32]) -> [u8; 32] { + let mut out = [0u8; HASH_LEN * 2]; + out[0..HASH_LEN].copy_from_slice(&a[..]); + out[HASH_LEN..].copy_from_slice(&b[..]); + hash(&out) +} + +/// Obtain the hash representation of a `scale_info::Field`. +fn get_field_hash( + registry: &PortableRegistry, + field: &Field, + visited_ids: &mut HashSet, +) -> [u8; 32] { + let mut bytes = get_type_hash(registry, field.ty.id, visited_ids); + + // XOR name and field name with the type hash if they exist + if let Some(name) = &field.name { + bytes = xor(bytes, hash(name.as_bytes())); + } + + bytes +} + +/// Obtain the hash representation of a `scale_info::Variant`. +fn get_variant_hash( + registry: &PortableRegistry, + var: &Variant, + visited_ids: &mut HashSet, +) -> [u8; 32] { + // Merge our hashes of the name and each field together using xor. + let mut bytes = hash(var.name.as_bytes()); + for field in &var.fields { + bytes = concat_and_hash(bytes, get_field_hash(registry, field, visited_ids)) + } + + bytes +} + +/// Obtain the hash representation of a `scale_info::TypeDef`. +fn get_type_def_hash( + registry: &PortableRegistry, + ty_def: &TypeDef, + visited_ids: &mut HashSet, +) -> [u8; 32] { + match ty_def { + TypeDef::Composite(composite) => { + let mut bytes = hash(&[TypeBeingHashed::Composite as u8]); + for field in &composite.fields { + bytes = concat_and_hash(bytes, get_field_hash(registry, field, visited_ids)); + } + bytes + } + TypeDef::Variant(variant) => { + let mut bytes = hash(&[TypeBeingHashed::Variant as u8]); + for var in &variant.variants { + bytes = concat_and_hash(bytes, get_variant_hash(registry, var, visited_ids)); + } + bytes + } + TypeDef::Sequence(sequence) => { + let bytes = hash(&[TypeBeingHashed::Sequence as u8]); + xor( + bytes, + get_type_hash(registry, sequence.type_param.id, visited_ids), + ) + } + TypeDef::Array(array) => { + // Take length into account; different length must lead to different hash. + let len_bytes = array.len.to_be_bytes(); + let bytes = hash(&[ + TypeBeingHashed::Array as u8, + len_bytes[0], + len_bytes[1], + len_bytes[2], + len_bytes[3], + ]); + xor( + bytes, + get_type_hash(registry, array.type_param.id, visited_ids), + ) + } + TypeDef::Tuple(tuple) => { + let mut bytes = hash(&[TypeBeingHashed::Tuple as u8]); + for field in &tuple.fields { + bytes = concat_and_hash(bytes, get_type_hash(registry, field.id, visited_ids)); + } + bytes + } + TypeDef::Primitive(primitive) => { + // Cloning the 'primitive' type should essentially be a copy. + hash(&[TypeBeingHashed::Primitive as u8, primitive.clone() as u8]) + } + TypeDef::Compact(compact) => { + let bytes = hash(&[TypeBeingHashed::Compact as u8]); + xor( + bytes, + get_type_hash(registry, compact.type_param.id, visited_ids), + ) + } + TypeDef::BitSequence(bitseq) => { + let mut bytes = hash(&[TypeBeingHashed::BitSequence as u8]); + bytes = xor( + bytes, + get_type_hash(registry, bitseq.bit_order_type.id, visited_ids), + ); + bytes = xor( + bytes, + get_type_hash(registry, bitseq.bit_store_type.id, visited_ids), + ); + bytes + } + } +} + +/// Obtain the hash representation of a `scale_info::Type` identified by id. +fn get_type_hash(registry: &PortableRegistry, id: u32, visited_ids: &mut HashSet) -> [u8; 32] { + // Guard against recursive types and return a fixed arbitrary hash + if !visited_ids.insert(id) { + return hash(MAGIC_RECURSIVE_TYPE_VALUE); + } + + let ty = registry + .resolve(id) + .expect("Type ID provided by the metadata is registered; qed"); + get_type_def_hash(registry, &ty.type_def, visited_ids) +} + +/// Obtain the hash representation of a `frame_metadata::v15::ExtrinsicMetadata`. +fn get_extrinsic_hash( + registry: &PortableRegistry, + extrinsic: &ExtrinsicMetadata, +) -> [u8; 32] { + let mut visited_ids = HashSet::::new(); + + let mut bytes = get_type_hash(registry, extrinsic.ty.id, &mut visited_ids); + + bytes = xor(bytes, hash(&[extrinsic.version])); + for signed_extension in extrinsic.signed_extensions.iter() { + let mut ext_bytes = hash(signed_extension.identifier.as_bytes()); + ext_bytes = xor( + ext_bytes, + get_type_hash(registry, signed_extension.ty.id, &mut visited_ids), + ); + ext_bytes = xor( + ext_bytes, + get_type_hash( + registry, + signed_extension.additional_signed.id, + &mut visited_ids, + ), + ); + bytes = concat_and_hash(bytes, ext_bytes); + } + + bytes +} + +/// Get the hash corresponding to a single storage entry. +fn get_storage_entry_hash( + registry: &PortableRegistry, + entry: &StorageEntryMetadata, + visited_ids: &mut HashSet, +) -> [u8; 32] { + let mut bytes = hash(entry.name.as_bytes()); + // Cloning 'entry.modifier' should essentially be a copy. + bytes = xor(bytes, hash(&[entry.modifier.clone() as u8])); + bytes = xor(bytes, hash(&entry.default)); + + match &entry.ty { + StorageEntryType::Plain(ty) => { + bytes = xor(bytes, get_type_hash(registry, ty.id, visited_ids)); + } + StorageEntryType::Map { + hashers, + key, + value, + } => { + for hasher in hashers { + // Cloning the hasher should essentially be a copy. + bytes = concat_and_hash(bytes, [hasher.clone() as u8; 32]); + } + bytes = xor(bytes, get_type_hash(registry, key.id, visited_ids)); + bytes = xor(bytes, get_type_hash(registry, value.id, visited_ids)); + } + } + + bytes +} + +/// Obtain the hash for a specific storage item, or an error if it's not found. +pub fn get_storage_hash( + metadata: &RuntimeMetadataV15, + pallet_name: &str, + storage_name: &str, +) -> Result<[u8; 32], NotFound> { + let pallet = metadata + .pallets + .iter() + .find(|p| p.name == pallet_name) + .ok_or(NotFound::Pallet)?; + + let storage = pallet.storage.as_ref().ok_or(NotFound::Item)?; + + let entry = storage + .entries + .iter() + .find(|s| s.name == storage_name) + .ok_or(NotFound::Item)?; + + let hash = get_storage_entry_hash(&metadata.types, entry, &mut HashSet::new()); + Ok(hash) +} + +/// Obtain the hash for a specific constant, or an error if it's not found. +pub fn get_constant_hash( + metadata: &RuntimeMetadataV15, + pallet_name: &str, + constant_name: &str, +) -> Result<[u8; 32], NotFound> { + let pallet = metadata + .pallets + .iter() + .find(|p| p.name == pallet_name) + .ok_or(NotFound::Pallet)?; + + let constant = pallet + .constants + .iter() + .find(|c| c.name == constant_name) + .ok_or(NotFound::Item)?; + + // We only need to check that the type of the constant asked for matches. + let bytes = get_type_hash(&metadata.types, constant.ty.id, &mut HashSet::new()); + Ok(bytes) +} + +/// Obtain the hash for a specific call, or an error if it's not found. +pub fn get_call_hash( + metadata: &RuntimeMetadataV15, + pallet_name: &str, + call_name: &str, +) -> Result<[u8; 32], NotFound> { + let pallet = metadata + .pallets + .iter() + .find(|p| p.name == pallet_name) + .ok_or(NotFound::Pallet)?; + + let call_id = pallet.calls.as_ref().ok_or(NotFound::Item)?.ty.id; + + let call_ty = metadata.types.resolve(call_id).ok_or(NotFound::Item)?; + + let call_variants = match &call_ty.type_def { + TypeDef::Variant(variant) => &variant.variants, + _ => return Err(NotFound::Item), + }; + + let variant = call_variants + .iter() + .find(|v| v.name == call_name) + .ok_or(NotFound::Item)?; + + // hash the specific variant representing the call we are interested in. + let hash = get_variant_hash(&metadata.types, variant, &mut HashSet::new()); + Ok(hash) +} + +/// Obtain the hash representation of a `frame_metadata::v15::PalletMetadata`. +pub fn get_pallet_hash( + registry: &PortableRegistry, + pallet: &PalletMetadata, +) -> [u8; 32] { + // The pallet could potentially be empty and not contain any calls, events and so on. + // Use a magic (arbitrary) value as a base for hashing. + let mut bytes = hash(MAGIC_PALLET_VALUE); + let mut visited_ids = HashSet::::new(); + + if let Some(calls) = &pallet.calls { + bytes = xor( + bytes, + get_type_hash(registry, calls.ty.id, &mut visited_ids), + ); + } + if let Some(ref event) = pallet.event { + bytes = xor( + bytes, + get_type_hash(registry, event.ty.id, &mut visited_ids), + ); + } + for constant in pallet.constants.iter() { + bytes = xor(bytes, hash(constant.name.as_bytes())); + bytes = xor( + bytes, + get_type_hash(registry, constant.ty.id, &mut visited_ids), + ); + } + if let Some(ref error) = pallet.error { + bytes = xor( + bytes, + get_type_hash(registry, error.ty.id, &mut visited_ids), + ); + } + if let Some(ref storage) = pallet.storage { + bytes = xor(bytes, hash(storage.prefix.as_bytes())); + for entry in storage.entries.iter() { + bytes = concat_and_hash( + bytes, + get_storage_entry_hash(registry, entry, &mut visited_ids), + ); + } + } + + bytes +} + +/// Obtain the hash representation of a `frame_metadata::v15::RuntimeMetadataV15`. +pub fn get_metadata_hash(metadata: &RuntimeMetadataV15) -> [u8; 32] { + // The number of metadata components, other than variable number of pallets that produce a unique hash. + const STATIC_METADATA_COMPONENTS: usize = 2; + + // Collect all pairs of (pallet name, pallet hash). + let mut pallets: Vec<(&str, [u8; 32])> = metadata + .pallets + .iter() + .map(|pallet| { + let hash = get_pallet_hash(&metadata.types, pallet); + (&*pallet.name, hash) + }) + .collect(); + + // Sort by pallet name to create a deterministic representation of the underlying metadata. + pallets.sort_by_key(|&(name, _hash)| name); + + // Note: pallet name is excluded from hashing. + // The number of hashes that we take into account, each having a `HASH_LEN` output. + let metadata_components = pallets.len() + STATIC_METADATA_COMPONENTS; + let mut bytes = Vec::with_capacity(metadata_components * HASH_LEN); + for (_, hash) in pallets.iter() { + bytes.extend(hash) + } + + bytes.extend(get_extrinsic_hash(&metadata.types, &metadata.extrinsic)); + + let mut visited_ids = HashSet::::new(); + bytes.extend(get_type_hash( + &metadata.types, + metadata.ty.id, + &mut visited_ids, + )); + + hash(&bytes) +} + +/// Obtain the hash representation of a `frame_metadata::v15::RuntimeMetadataV15` +/// hashing only the provided pallets. +/// +/// **Note:** This is similar to `get_metadata_hash`, but performs hashing only of the provided +/// pallets if they exist. There are cases where the runtime metadata contains a subset of +/// the pallets from the static metadata. In those cases, the static API can communicate +/// properly with the subset of pallets from the runtime node. +pub fn get_metadata_per_pallet_hash>( + metadata: &RuntimeMetadataV15, + pallets: &[T], +) -> [u8; 32] { + // Collect all pairs of (pallet name, pallet hash). + let mut pallets_hashed: Vec<(&str, [u8; 32])> = metadata + .pallets + .iter() + .filter_map(|pallet| { + // Make sure to filter just the pallets we are interested in. + let in_pallet = pallets + .iter() + .any(|pallet_ref| pallet_ref.as_ref() == pallet.name); + if in_pallet { + let hash = get_pallet_hash(&metadata.types, pallet); + Some((&*pallet.name, hash)) + } else { + None + } + }) + .collect(); + + // Sort by pallet name to create a deterministic representation of the underlying metadata. + pallets_hashed.sort_by_key(|&(name, _hash)| name); + + // Note: pallet name is excluded from hashing. + // We are only hashing the hashes of the pallets. + let mut bytes = Vec::with_capacity(pallets_hashed.len() * HASH_LEN); + for (_, hash) in pallets_hashed.iter() { + bytes.extend(hash) + } + + hash(&bytes) +} + +/// An error returned if we attempt to get the hash for a specific call, constant +/// or storage item that doesn't exist. +#[derive(Clone, Debug)] +pub enum NotFound { + Pallet, + Item, +} + +#[cfg(test)] +mod tests { + use super::*; + use bitvec::{order::Lsb0, vec::BitVec}; + use frame_metadata::v15::{ + ExtrinsicMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeMetadataV15, + StorageEntryMetadata, StorageEntryModifier, + }; + use scale_info::meta_type; + + // Define recursive types. + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct A { + pub b: Box, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct B { + pub a: Box, + } + + // Define TypeDef supported types. + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // TypeDef::Composite with TypeDef::Array with Typedef::Primitive. + struct AccountId32([u8; 32]); + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // TypeDef::Variant. + enum DigestItem { + PreRuntime( + // TypeDef::Array with primitive. + [::core::primitive::u8; 4usize], + // TypeDef::Sequence. + ::std::vec::Vec<::core::primitive::u8>, + ), + Other(::std::vec::Vec<::core::primitive::u8>), + // Nested TypeDef::Tuple. + RuntimeEnvironmentUpdated(((i8, i16), (u32, u64))), + // TypeDef::Compact. + Index(#[codec(compact)] ::core::primitive::u8), + // TypeDef::BitSequence. + BitSeq(BitVec), + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // Ensure recursive types and TypeDef variants are captured. + struct MetadataTestType { + recursive: A, + composite: AccountId32, + type_def: DigestItem, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // Simulate a PalletCallMetadata. + enum Call { + #[codec(index = 0)] + FillBlock { ratio: AccountId32 }, + #[codec(index = 1)] + Remark { remark: DigestItem }, + } + + fn build_default_extrinsic() -> ExtrinsicMetadata { + ExtrinsicMetadata { + ty: meta_type::<()>(), + version: 0, + signed_extensions: vec![], + } + } + + fn default_pallet() -> PalletMetadata { + PalletMetadata { + name: "Test", + storage: None, + calls: None, + event: None, + constants: vec![], + error: None, + index: 0, + docs: vec![], + } + } + + fn build_default_pallets() -> Vec { + vec![ + PalletMetadata { + name: "First", + calls: Some(PalletCallMetadata { + ty: meta_type::(), + }), + ..default_pallet() + }, + PalletMetadata { + name: "Second", + index: 1, + calls: Some(PalletCallMetadata { + ty: meta_type::<(DigestItem, AccountId32, A)>(), + }), + ..default_pallet() + }, + ] + } + + fn pallets_to_metadata(pallets: Vec) -> RuntimeMetadataV15 { + RuntimeMetadataV15::new( + pallets, + build_default_extrinsic(), + meta_type::<()>(), + vec![], + ) + } + + #[test] + fn different_pallet_index() { + let pallets = build_default_pallets(); + let mut pallets_swap = pallets.clone(); + + let metadata = pallets_to_metadata(pallets); + + // Change the order in which pallets are registered. + pallets_swap.swap(0, 1); + pallets_swap[0].index = 0; + pallets_swap[1].index = 1; + let metadata_swap = pallets_to_metadata(pallets_swap); + + let hash = get_metadata_hash(&metadata); + let hash_swap = get_metadata_hash(&metadata_swap); + + // Changing pallet order must still result in a deterministic unique hash. + assert_eq!(hash, hash_swap); + } + + #[test] + fn recursive_type() { + let mut pallet = default_pallet(); + pallet.calls = Some(PalletCallMetadata { + ty: meta_type::(), + }); + let metadata = pallets_to_metadata(vec![pallet]); + + // Check hashing algorithm finishes on a recursive type. + get_metadata_hash(&metadata); + } + + #[test] + /// Ensure correctness of hashing when parsing the `metadata.types`. + /// + /// Having a recursive structure `A: { B }` and `B: { A }` registered in different order + /// `types: { { id: 0, A }, { id: 1, B } }` and `types: { { id: 0, B }, { id: 1, A } }` + /// must produce the same deterministic hashing value. + fn recursive_types_different_order() { + let mut pallets = build_default_pallets(); + pallets[0].calls = Some(PalletCallMetadata { + ty: meta_type::(), + }); + pallets[1].calls = Some(PalletCallMetadata { + ty: meta_type::(), + }); + pallets[1].index = 1; + let mut pallets_swap = pallets.clone(); + let metadata = pallets_to_metadata(pallets); + + pallets_swap.swap(0, 1); + pallets_swap[0].index = 0; + pallets_swap[1].index = 1; + let metadata_swap = pallets_to_metadata(pallets_swap); + + let hash = get_metadata_hash(&metadata); + let hash_swap = get_metadata_hash(&metadata_swap); + + // Changing pallet order must still result in a deterministic unique hash. + assert_eq!(hash, hash_swap); + } + + #[test] + fn pallet_hash_correctness() { + let compare_pallets_hash = |lhs: &PalletMetadata, rhs: &PalletMetadata| { + let metadata = pallets_to_metadata(vec![lhs.clone()]); + let hash = get_metadata_hash(&metadata); + + let metadata = pallets_to_metadata(vec![rhs.clone()]); + let new_hash = get_metadata_hash(&metadata); + + assert_ne!(hash, new_hash); + }; + + // Build metadata progressively from an empty pallet to a fully populated pallet. + let mut pallet = default_pallet(); + let pallet_lhs = pallet.clone(); + pallet.storage = Some(PalletStorageMetadata { + prefix: "Storage", + entries: vec![StorageEntryMetadata { + name: "BlockWeight", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![], + docs: vec![], + }], + }); + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + // Calls are similar to: + // + // ``` + // pub enum Call { + // call_name_01 { arg01: type }, + // call_name_02 { arg01: type, arg02: type } + // } + // ``` + pallet.calls = Some(PalletCallMetadata { + ty: meta_type::(), + }); + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + // Events are similar to Calls. + pallet.event = Some(PalletEventMetadata { + ty: meta_type::(), + }); + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + pallet.constants = vec![PalletConstantMetadata { + name: "BlockHashCount", + ty: meta_type::(), + value: vec![96u8, 0, 0, 0], + docs: vec![], + }]; + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + pallet.error = Some(PalletErrorMetadata { + ty: meta_type::(), + }); + compare_pallets_hash(&pallet_lhs, &pallet); + } + + #[test] + fn metadata_per_pallet_hash_correctness() { + let pallets = build_default_pallets(); + + // Build metadata with just the first pallet. + let metadata_one = pallets_to_metadata(vec![pallets[0].clone()]); + // Build metadata with both pallets. + let metadata_both = pallets_to_metadata(pallets); + + // Hashing will ignore any non-existant pallet and return the same result. + let hash = get_metadata_per_pallet_hash(&metadata_one, &["First", "Second"]); + let hash_rhs = get_metadata_per_pallet_hash(&metadata_one, &["First"]); + assert_eq!(hash, hash_rhs, "hashing should ignore non-existant pallets"); + + // Hashing one pallet from metadata with 2 pallets inserted will ignore the second pallet. + let hash_second = get_metadata_per_pallet_hash(&metadata_both, &["First"]); + assert_eq!( + hash_second, hash, + "hashing one pallet should ignore the others" + ); + + // Check hashing with all pallets. + let hash_second = get_metadata_per_pallet_hash(&metadata_both, &["First", "Second"]); + assert_ne!( + hash_second, hash, + "hashing both pallets should produce a different result from hashing just one pallet" + ); + } + + #[test] + fn field_semantic_changes() { + // Get a hash representation of the provided meta type, + // inserted in the context of pallet metadata call. + let to_hash = |meta_ty| { + let pallet = PalletMetadata { + calls: Some(PalletCallMetadata { ty: meta_ty }), + ..default_pallet() + }; + let metadata = pallets_to_metadata(vec![pallet]); + get_metadata_hash(&metadata) + }; + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldNotNamedA { + First(u8), + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldNotNamedB { + First(u8), + } + // Semantic changes apply only to field names. + // This is considered to be a good tradeoff in hashing performance, as refactoring + // a structure / enum 's name is less likely to cause a breaking change. + // Even if the enums have different names, 'EnumFieldNotNamedA' and 'EnumFieldNotNamedB', + // they are equal in meaning (i.e, both contain `First(u8)`). + assert_eq!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructFieldNotNamedA([u8; 32]); + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructFieldNotNamedSecondB([u8; 32]); + // Similarly to enums, semantic changes apply only inside the structure fields. + assert_eq!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldNotNamed { + First(u8), + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldNotNamedSecond { + Second(u8), + } + // The enums are binary compatible, they contain a different semantic meaning: + // `First(u8)` and `Second(u8)`. + assert_ne!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldNamed { + First { a: u8 }, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldNamedSecond { + First { b: u8 }, + } + // Named fields contain a different semantic meaning ('a' and 'b'). + assert_ne!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructFieldNamed { + a: u32, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructFieldNamedSecond { + b: u32, + } + // Similar to enums, struct fields contain a different semantic meaning ('a' and 'b'). + assert_ne!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumField { + First, + // Field is unnamed, but has type name `u8`. + Second(u8), + // File is named and has type name `u8`. + Third { named: u8 }, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumFieldSwap { + Second(u8), + First, + Third { named: u8 }, + } + // Swapping the registration order should also be taken into account. + assert_ne!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructField { + a: u32, + b: u32, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructFieldSwap { + b: u32, + a: u32, + } + assert_ne!( + to_hash(meta_type::()), + to_hash(meta_type::()) + ); + } +} diff --git a/subxt/Cargo.toml b/subxt/Cargo.toml index 18566508e3..a3a04600ae 100644 --- a/subxt/Cargo.toml +++ b/subxt/Cargo.toml @@ -49,7 +49,7 @@ serde_json = { version = "1.0.96", features = ["raw_value"] } thiserror = "1.0.40" tracing = "0.1.34" parking_lot = "0.12.0" -frame-metadata = "15.0.0" +frame-metadata = { version = "15.1.0", features = ["v14", "v15-unstable", "std"] } derivative = "2.2.0" either = "1.8.1" diff --git a/subxt/src/events/events_type.rs b/subxt/src/events/events_type.rs index 11f3294a7c..4fcec55cb7 100644 --- a/subxt/src/events/events_type.rs +++ b/subxt/src/events/events_type.rs @@ -430,7 +430,7 @@ pub(crate) mod test_utils { use crate::{Config, SubstrateConfig}; use codec::Encode; use frame_metadata::{ - v14::{ExtrinsicMetadata, PalletEventMetadata, PalletMetadata, RuntimeMetadataV14}, + v15::{ExtrinsicMetadata, PalletEventMetadata, PalletMetadata, RuntimeMetadataV15}, RuntimeMetadataPrefixed, }; use scale_info::{meta_type, TypeInfo}; @@ -503,6 +503,7 @@ pub(crate) mod test_utils { constants: vec![], error: None, index: 0, + docs: vec![], }]; let extrinsic = ExtrinsicMetadata { @@ -511,8 +512,8 @@ pub(crate) mod test_utils { signed_extensions: vec![], }; - let v14 = RuntimeMetadataV14::new(pallets, extrinsic, meta_type::<()>()); - let runtime_metadata: RuntimeMetadataPrefixed = v14.into(); + let meta = RuntimeMetadataV15::new(pallets, extrinsic, meta_type::<()>(), vec![]); + let runtime_metadata: RuntimeMetadataPrefixed = meta.into(); Metadata::try_from(runtime_metadata).unwrap() } diff --git a/subxt/src/metadata/metadata_type.rs b/subxt/src/metadata/metadata_type.rs index d9668bff3f..7a8966d3ea 100644 --- a/subxt/src/metadata/metadata_type.rs +++ b/subxt/src/metadata/metadata_type.rs @@ -5,8 +5,8 @@ use super::hash_cache::HashCache; use codec::Error as CodecError; use frame_metadata::{ - PalletConstantMetadata, RuntimeMetadata, RuntimeMetadataPrefixed, RuntimeMetadataV14, - StorageEntryMetadata, META_RESERVED, + v15::PalletConstantMetadata, v15::RuntimeMetadataV15, v15::StorageEntryMetadata, + RuntimeMetadata, RuntimeMetadataPrefixed, META_RESERVED, }; use parking_lot::RwLock; use scale_info::{form::PortableForm, PortableRegistry, Type}; @@ -65,7 +65,7 @@ pub enum MetadataError { // We hide the innards behind an Arc so that it's easy to clone and share. #[derive(Debug)] struct MetadataInner { - metadata: RuntimeMetadataV14, + metadata: RuntimeMetadataV15, // Events are hashed by pallet an error index (decode oriented) events: HashMap<(u8, u8), EventMetadata>, @@ -147,7 +147,7 @@ impl Metadata { } /// Return the runtime metadata. - pub fn runtime_metadata(&self) -> &RuntimeMetadataV14 { + pub fn runtime_metadata(&self) -> &RuntimeMetadataV15 { &self.inner.metadata } @@ -371,7 +371,8 @@ impl TryFrom for Metadata { return Err(InvalidMetadataError::InvalidPrefix); } let metadata = match metadata.1 { - RuntimeMetadata::V14(meta) => meta, + RuntimeMetadata::V14(v14) => subxt_metadata::metadata_v14_to_latest(v14), + RuntimeMetadata::V15(v15) => v15, _ => return Err(InvalidMetadataError::InvalidVersion), }; @@ -503,8 +504,9 @@ impl TryFrom for Metadata { #[cfg(test)] mod tests { use super::*; - use frame_metadata::{ - ExtrinsicMetadata, PalletStorageMetadata, StorageEntryModifier, StorageEntryType, + use frame_metadata::v15::{ + ExtrinsicMetadata, PalletCallMetadata, PalletMetadata, PalletStorageMetadata, + StorageEntryModifier, StorageEntryType, }; use scale_info::{meta_type, TypeInfo}; @@ -531,19 +533,20 @@ mod tests { value: vec![1, 2, 3], docs: vec![], }; - let pallet = frame_metadata::PalletMetadata { + let pallet = PalletMetadata { index: 0, name: "System", - calls: Some(frame_metadata::PalletCallMetadata { + calls: Some(PalletCallMetadata { ty: meta_type::(), }), storage: Some(storage), constants: vec![constant], event: None, error: None, + docs: vec![], }; - let metadata = RuntimeMetadataV14::new( + let metadata = RuntimeMetadataV15::new( vec![pallet], ExtrinsicMetadata { ty: meta_type::<()>(), @@ -551,6 +554,7 @@ mod tests { signed_extensions: vec![], }, meta_type::<()>(), + vec![], ); let prefixed = RuntimeMetadataPrefixed::from(metadata); diff --git a/subxt/src/storage/storage_address.rs b/subxt/src/storage/storage_address.rs index b2cb21964e..527d8c31bd 100644 --- a/subxt/src/storage/storage_address.rs +++ b/subxt/src/storage/storage_address.rs @@ -8,7 +8,7 @@ use crate::{ metadata::{DecodeWithMetadata, EncodeWithMetadata, Metadata}, utils::{Encoded, Static}, }; -use frame_metadata::{StorageEntryType, StorageHasher}; +use frame_metadata::v15::{StorageEntryType, StorageHasher}; use scale_info::TypeDef; use std::borrow::Cow; diff --git a/subxt/src/storage/storage_type.rs b/subxt/src/storage/storage_type.rs index 1cb7c30ce3..84c554af6c 100644 --- a/subxt/src/storage/storage_type.rs +++ b/subxt/src/storage/storage_type.rs @@ -11,7 +11,7 @@ use crate::{ Config, }; use derivative::Derivative; -use frame_metadata::StorageEntryType; +use frame_metadata::v15::StorageEntryType; use scale_info::form::PortableForm; use std::{future::Future, marker::PhantomData}; diff --git a/testing/integration-tests/Cargo.toml b/testing/integration-tests/Cargo.toml index 9ea873c474..7f7d1567c5 100644 --- a/testing/integration-tests/Cargo.toml +++ b/testing/integration-tests/Cargo.toml @@ -18,7 +18,7 @@ default = ["subxt/integration-tests"] [dev-dependencies] assert_matches = "1.5.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "full", "bit-vec"] } -frame-metadata = "15.0.0" +frame-metadata = { version = "15.1.0", features = ["v14", "v15-unstable", "std"] } futures = "0.3.27" hex = "0.4.3" regex = "1.7.3" @@ -29,6 +29,7 @@ sp-keyring = "23.0.0" syn = "1.0.109" subxt = { version = "0.28.0", path = "../../subxt" } subxt-codegen = { version = "0.28.0", path = "../../codegen" } +subxt-metadata = { version = "0.28.0", path = "../../metadata" } test-runtime = { path = "../test-runtime" } tokio = { version = "1.27", features = ["macros", "time"] } tracing = "0.1.34" diff --git a/testing/integration-tests/src/blocks/mod.rs b/testing/integration-tests/src/blocks/mod.rs index d9c37c2e73..57d02972b4 100644 --- a/testing/integration-tests/src/blocks/mod.rs +++ b/testing/integration-tests/src/blocks/mod.rs @@ -105,8 +105,11 @@ async fn runtime_api_call() -> Result<(), subxt::Error> { .call_raw::<(Compact, RuntimeMetadataPrefixed)>("Metadata_metadata", None) .await?; let metadata_call = match meta.1 { - frame_metadata::RuntimeMetadata::V14(metadata) => metadata, - _ => panic!("Metadata V14 unavailable"), + frame_metadata::RuntimeMetadata::V14(metadata) => { + subxt_metadata::metadata_v14_to_latest(metadata) + } + frame_metadata::RuntimeMetadata::V15(metadata) => metadata, + _ => panic!("Metadata V14 or V15 unavailable"), }; // Compare the runtime API call against the `state_getMetadata`. diff --git a/testing/integration-tests/src/client/mod.rs b/testing/integration-tests/src/client/mod.rs index 2dee4a2c66..f9808ea47d 100644 --- a/testing/integration-tests/src/client/mod.rs +++ b/testing/integration-tests/src/client/mod.rs @@ -394,10 +394,12 @@ async fn rpc_state_call() { .await .unwrap(); let metadata_call = match meta.1 { - frame_metadata::RuntimeMetadata::V14(metadata) => metadata, - _ => panic!("Metadata V14 unavailable"), + frame_metadata::RuntimeMetadata::V14(metadata) => { + subxt_metadata::metadata_v14_to_latest(metadata) + } + frame_metadata::RuntimeMetadata::V15(metadata) => metadata, + _ => panic!("Metadata V14 or V15 unavailable"), }; - // Compare the runtime API call against the `state_getMetadata`. let metadata = api.rpc().metadata(None).await.unwrap(); let metadata = metadata.runtime_metadata(); diff --git a/testing/integration-tests/src/codegen/codegen_documentation.rs b/testing/integration-tests/src/codegen/codegen_documentation.rs index b81722afcf..b8356d5bd9 100644 --- a/testing/integration-tests/src/codegen/codegen_documentation.rs +++ b/testing/integration-tests/src/codegen/codegen_documentation.rs @@ -14,7 +14,8 @@ fn metadata_docs() -> Vec { // Load the runtime metadata downloaded from a node via `test-runtime`. let meta = load_test_metadata(); let metadata = match meta.1 { - frame_metadata::RuntimeMetadata::V14(v14) => v14, + frame_metadata::RuntimeMetadata::V14(v14) => subxt_metadata::metadata_v14_to_latest(v14), + frame_metadata::RuntimeMetadata::V15(v15) => v15, _ => panic!("Unsupported metadata version {:?}", meta.1), }; diff --git a/testing/integration-tests/src/metadata/validation.rs b/testing/integration-tests/src/metadata/validation.rs index 015be5e589..c8d30d6418 100644 --- a/testing/integration-tests/src/metadata/validation.rs +++ b/testing/integration-tests/src/metadata/validation.rs @@ -4,9 +4,11 @@ use crate::{node_runtime, test_context, TestContext}; use frame_metadata::{ - ExtrinsicMetadata, PalletCallMetadata, PalletMetadata, PalletStorageMetadata, - RuntimeMetadataPrefixed, RuntimeMetadataV14, StorageEntryMetadata, StorageEntryModifier, - StorageEntryType, + v15::{ + ExtrinsicMetadata, PalletCallMetadata, PalletMetadata, PalletStorageMetadata, + RuntimeMetadataV15, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + }, + RuntimeMetadataPrefixed, }; use scale_info::{ build::{Fields, Variants}, @@ -15,7 +17,7 @@ use scale_info::{ use subxt::{Metadata, OfflineClient, SubstrateConfig}; async fn metadata_to_api( - metadata: RuntimeMetadataV14, + metadata: RuntimeMetadataV15, ctx: &TestContext, ) -> OfflineClient { let prefixed = RuntimeMetadataPrefixed::from(metadata); @@ -37,7 +39,7 @@ async fn full_metadata_check() { assert!(node_runtime::validate_codegen(&api).is_ok()); // Modify the metadata. - let mut metadata: RuntimeMetadataV14 = api.metadata().runtime_metadata().clone(); + let mut metadata = api.metadata().runtime_metadata().clone(); metadata.pallets[0].name = "NewPallet".to_string(); let api = metadata_to_api(metadata, &ctx).await; @@ -59,7 +61,7 @@ async fn constant_values_are_not_validated() { assert!(api.constants().at(&deposit_addr).is_ok()); // Modify the metadata. - let mut metadata: RuntimeMetadataV14 = api.metadata().runtime_metadata().clone(); + let mut metadata = api.metadata().runtime_metadata().clone(); let mut existential = metadata .pallets @@ -89,11 +91,12 @@ fn default_pallet() -> PalletMetadata { constants: vec![], error: None, index: 0, + docs: vec![], } } -fn pallets_to_metadata(pallets: Vec) -> RuntimeMetadataV14 { - RuntimeMetadataV14::new( +fn pallets_to_metadata(pallets: Vec) -> RuntimeMetadataV15 { + RuntimeMetadataV15::new( pallets, ExtrinsicMetadata { ty: meta_type::<()>(), @@ -101,6 +104,7 @@ fn pallets_to_metadata(pallets: Vec) -> RuntimeMetadataV14 { signed_extensions: vec![], }, meta_type::<()>(), + vec![], ) } diff --git a/testing/ui-tests/Cargo.toml b/testing/ui-tests/Cargo.toml index da76100381..200d45aa98 100644 --- a/testing/ui-tests/Cargo.toml +++ b/testing/ui-tests/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dev-dependencies] trybuild = "1.0.79" scale-info = { version = "2.5.0", features = ["bit-vec"] } -frame-metadata = "15.0.0" +frame-metadata = { version = "15.1.0", features = ["v14", "v15-unstable", "std"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "full", "bit-vec"] } subxt = { path = "../../subxt" } subxt-metadata = { path = "../../metadata" } diff --git a/testing/ui-tests/src/storage.rs b/testing/ui-tests/src/storage.rs index 07983521d4..eb9c96cf1d 100644 --- a/testing/ui-tests/src/storage.rs +++ b/testing/ui-tests/src/storage.rs @@ -3,7 +3,8 @@ // see LICENSE for license details. use frame_metadata::{ - RuntimeMetadataPrefixed, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + v15::{StorageEntryMetadata, StorageEntryModifier, StorageEntryType}, + RuntimeMetadataPrefixed, }; use scale_info::meta_type; diff --git a/testing/ui-tests/src/utils/mod.rs b/testing/ui-tests/src/utils/mod.rs index d2aef3c31f..9c8a1c86fa 100644 --- a/testing/ui-tests/src/utils/mod.rs +++ b/testing/ui-tests/src/utils/mod.rs @@ -7,8 +7,11 @@ mod metadata_test_runner; mod pallet_metadata_test_runner; use frame_metadata::{ - v14::RuntimeMetadataV14, ExtrinsicMetadata, PalletMetadata, PalletStorageMetadata, - RuntimeMetadataPrefixed, StorageEntryMetadata, + v15::{ + ExtrinsicMetadata, PalletMetadata, PalletStorageMetadata, RuntimeMetadataV15, + StorageEntryMetadata, + }, + RuntimeMetadataPrefixed, }; use scale_info::{meta_type, IntoPortable, TypeInfo}; @@ -28,7 +31,7 @@ pub fn generate_metadata_from_pallets_custom_dispatch_error()); - let metadata = RuntimeMetadataV14 { + let metadata = RuntimeMetadataV15 { types: registry.into(), pallets, extrinsic, ty, + apis: vec![], }; RuntimeMetadataPrefixed::from(metadata) @@ -86,6 +90,7 @@ pub fn generate_metadata_from_storage_entries( calls: None, event: None, error: None, + docs: vec![], }; generate_metadata_from_pallets(vec![pallet]) diff --git a/testing/ui-tests/src/utils/pallet_metadata_test_runner.rs b/testing/ui-tests/src/utils/pallet_metadata_test_runner.rs index a27450c9f5..e87c755f23 100644 --- a/testing/ui-tests/src/utils/pallet_metadata_test_runner.rs +++ b/testing/ui-tests/src/utils/pallet_metadata_test_runner.rs @@ -3,15 +3,15 @@ // see LICENSE for license details. use codec::{Decode, Encode}; -use frame_metadata::{RuntimeMetadataPrefixed, RuntimeMetadataV14}; +use frame_metadata::{v15::RuntimeMetadataV15, RuntimeMetadataPrefixed}; use std::io::Read; -use subxt_metadata::retain_metadata_pallets; +use subxt_metadata::{metadata_v14_to_latest, retain_metadata_pallets}; static TEST_DIR_PREFIX: &str = "subxt_generated_pallets_ui_tests_"; static METADATA_FILE: &str = "../../artifacts/polkadot_metadata.scale"; pub struct PalletMetadataTestRunner { - metadata: RuntimeMetadataV14, + metadata: RuntimeMetadataV15, index: usize, } @@ -28,8 +28,9 @@ impl PalletMetadataTestRunner { Decode::decode(&mut &*bytes).expect("Cannot decode metadata bytes"); let metadata = match meta.1 { - frame_metadata::RuntimeMetadata::V14(v14) => v14, - _ => panic!("Unsupported metadata version. Tests support only v14"), + frame_metadata::RuntimeMetadata::V14(v14) => metadata_v14_to_latest(v14), + frame_metadata::RuntimeMetadata::V15(v15) => v15, + _ => panic!("Unsupported metadata version {:?}", meta.1), }; PalletMetadataTestRunner { metadata, index: 0 }