From cc6ed8a0c455f013577c408bf2421ad066cba941 Mon Sep 17 00:00:00 2001 From: vedhavyas Date: Tue, 9 Jul 2024 11:47:27 +0530 Subject: [PATCH 01/37] add functionality to freeze, unfreeze, and prune a given execution reciept of a given domain through Sudo --- crates/pallet-domains/src/block_tree.rs | 38 +++++++- crates/pallet-domains/src/lib.rs | 124 ++++++++++++++++++++++-- 2 files changed, 153 insertions(+), 9 deletions(-) diff --git a/crates/pallet-domains/src/block_tree.rs b/crates/pallet-domains/src/block_tree.rs index 5d57de1240..9cf59ccc01 100644 --- a/crates/pallet-domains/src/block_tree.rs +++ b/crates/pallet-domains/src/block_tree.rs @@ -616,10 +616,12 @@ mod tests { use crate::tests::{ create_dummy_bundle_with_receipts, create_dummy_receipt, extend_block_tree, extend_block_tree_from_zero, get_block_tree_node_at, new_test_ext_with_extensions, - register_genesis_domain, run_to_block, BlockTreePruningDepth, Test, + register_genesis_domain, run_to_block, BlockTreePruningDepth, Domains, Test, }; + use crate::FrozenDomains; use frame_support::dispatch::RawOrigin; use frame_support::{assert_err, assert_ok}; + use frame_system::Origin; use sp_core::H256; use sp_domains::{BundleDigest, InboxedBundle, InvalidBundleType}; @@ -971,6 +973,40 @@ mod tests { }); } + #[test] + fn test_prune_domain_execution_receipt() { + let creator = 0u128; + let operator_id = 1u64; + let mut ext = new_test_ext_with_extensions(); + ext.execute_with(|| { + let domain_id = register_genesis_domain(creator, vec![operator_id]); + let _next_receipt = extend_block_tree_from_zero(domain_id, operator_id, 3); + let head_receipt_number = HeadReceiptNumber::::get(domain_id); + + // freeze domain + assert!(!FrozenDomains::::get().contains(&domain_id)); + Domains::freeze_domain(Origin::::Root.into(), domain_id).unwrap(); + assert!(FrozenDomains::::get().contains(&domain_id)); + + // prune execution recept + let head_receipt_hash = BlockTree::::get(domain_id, head_receipt_number).unwrap(); + Domains::prune_domain_execution_receipt( + Origin::::Root.into(), + domain_id, + head_receipt_hash, + ) + .unwrap(); + assert_eq!( + HeadReceiptNumber::::get(domain_id), + head_receipt_number - 1 + ); + + // unfreeze domain + Domains::unfreeze_domain(Origin::::Root.into(), domain_id).unwrap(); + assert!(!FrozenDomains::::get().contains(&domain_id)); + }) + } + #[test] fn test_invalid_receipt() { let creator = 0u128; diff --git a/crates/pallet-domains/src/lib.rs b/crates/pallet-domains/src/lib.rs index 88398b6c5b..37c99430d5 100644 --- a/crates/pallet-domains/src/lib.rs +++ b/crates/pallet-domains/src/lib.rs @@ -167,12 +167,13 @@ pub(crate) type StateRootOf = <::Hashing as Hash>: mod pallet { #![allow(clippy::large_enum_variant)] + #[cfg(not(feature = "runtime-benchmarks"))] + use crate::block_tree::AcceptedReceiptType; use crate::block_tree::{ - execution_receipt_type, process_execution_receipt, Error as BlockTreeError, ReceiptType, + execution_receipt_type, process_execution_receipt, prune_receipt, Error as BlockTreeError, + ReceiptType, }; #[cfg(not(feature = "runtime-benchmarks"))] - use crate::block_tree::{prune_receipt, AcceptedReceiptType}; - #[cfg(not(feature = "runtime-benchmarks"))] use crate::bundle_storage_fund::refund_storage_fee; use crate::bundle_storage_fund::{charge_bundle_storage_fee, Error as BundleStorageFundError}; use crate::domain_registry::{ @@ -185,13 +186,12 @@ mod pallet { ScheduledRuntimeUpgrade, }; #[cfg(not(feature = "runtime-benchmarks"))] - use crate::staking::do_mark_operators_as_slashed; - #[cfg(not(feature = "runtime-benchmarks"))] use crate::staking::do_reward_operators; use crate::staking::{ - do_deregister_operator, do_nominate_operator, do_register_operator, do_unlock_funds, - do_unlock_nominator, do_withdraw_stake, Deposit, DomainEpoch, Error as StakingError, - Operator, OperatorConfig, SharePrice, StakingSummary, Withdrawal, + do_deregister_operator, do_mark_operators_as_slashed, do_nominate_operator, + do_register_operator, do_unlock_funds, do_unlock_nominator, do_withdraw_stake, Deposit, + DomainEpoch, Error as StakingError, Operator, OperatorConfig, SharePrice, StakingSummary, + Withdrawal, }; #[cfg(not(feature = "runtime-benchmarks"))] use crate::staking_epoch::do_slash_operator; @@ -713,6 +713,11 @@ mod pallet { pub type DomainSudoCalls = StorageMap<_, Identity, DomainId, DomainSudoCall, ValueQuery>; + /// Storage that hold a list of all frozen domains. + /// A frozen domain does not accept the bundles but does accept a fraud proof. + #[pallet::storage] + pub type FrozenDomains = StorageValue<_, BTreeSet, ValueQuery>; + #[derive(TypeInfo, Encode, Decode, PalletError, Debug, PartialEq)] pub enum BundleError { /// Can not find the operator for given operator id. @@ -752,6 +757,8 @@ mod pallet { SlotSmallerThanPreviousBlockBundle, /// Equivocated bundle in current block EquivocatedBundle, + /// Domain is frozen and cannot accept new bundles + DomainFrozen, } #[derive(TypeInfo, Encode, Decode, PalletError, Debug, PartialEq)] @@ -880,6 +887,8 @@ mod pallet { DomainSudoCallExists, /// Invalid Domain sudo call. InvalidDomainSudoCall, + /// Domain must be frozen before execution receipt can be pruned. + DomainNotFrozen, } /// Reason for slashing an operator @@ -977,6 +986,16 @@ mod pallet { nominator_id: NominatorId, amount: BalanceOf, }, + DomainFrozen { + domain_id: DomainId, + }, + DomainUnfrozen { + domain_id: DomainId, + }, + PrunedExecutionReceipt { + domain_id: DomainId, + new_head_receipt_number: Option>, + }, } /// Per-domain state for tx range calculation. @@ -1541,6 +1560,85 @@ mod pallet { ); Ok(()) } + + /// Freezes a given domain. + /// A frozen domain does not accept new bundles but accepts fraud proofs. + #[pallet::call_index(17)] + #[pallet::weight(::DbWeight::get().reads_writes(0, 1))] + pub fn freeze_domain(origin: OriginFor, domain_id: DomainId) -> DispatchResult { + ensure_root(origin)?; + FrozenDomains::::mutate(|frozen_domains| frozen_domains.insert(domain_id)); + Self::deposit_event(Event::DomainFrozen { domain_id }); + Ok(()) + } + + /// Unfreezes a frozen domain. + #[pallet::call_index(18)] + #[pallet::weight(::DbWeight::get().reads_writes(0, 1))] + pub fn unfreeze_domain(origin: OriginFor, domain_id: DomainId) -> DispatchResult { + ensure_root(origin)?; + FrozenDomains::::mutate(|frozen_domains| frozen_domains.remove(&domain_id)); + Self::deposit_event(Event::DomainUnfrozen { domain_id }); + Ok(()) + } + + /// Prunes a given execution receipt for given frozen domain. + /// This call assumes the execution receipt to be bad and implicitly trusts Sudo + /// to do necessary validation of the ER before dispatching this call. + #[pallet::call_index(19)] + #[pallet::weight(Pallet::::max_prune_domain_execution_receipt())] + pub fn prune_domain_execution_receipt( + origin: OriginFor, + domain_id: DomainId, + bad_receipt_hash: ReceiptHashFor, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + ensure!( + FrozenDomains::::get().contains(&domain_id), + Error::::DomainNotFrozen + ); + + let head_receipt_number = HeadReceiptNumber::::get(domain_id); + let bad_receipt_number = BlockTreeNodes::::get(bad_receipt_hash) + .ok_or::>(FraudProofError::BadReceiptNotFound.into())? + .execution_receipt + .domain_block_number; + // The `head_receipt_number` must greater than or equal to any existing receipt, including + // the bad receipt. + ensure!( + head_receipt_number >= bad_receipt_number, + Error::::from(FraudProofError::BadReceiptNotFound), + ); + + let mut actual_weight = T::DbWeight::get().reads(3); + + // prune the bad ER + let block_tree_node = prune_receipt::(domain_id, bad_receipt_number) + .map_err(Error::::from)? + .ok_or::>(FraudProofError::BadReceiptNotFound.into())?; + + actual_weight = actual_weight.saturating_add(T::WeightInfo::handle_bad_receipt( + (block_tree_node.operator_ids.len() as u32).min(MAX_BUNLDE_PER_BLOCK), + )); + + do_mark_operators_as_slashed::( + block_tree_node.operator_ids.into_iter(), + SlashedReason::BadExecutionReceipt(bad_receipt_hash), + ) + .map_err(Error::::from)?; + + // Update the head receipt number to `bad_receipt_number - 1` + let new_head_receipt_number = bad_receipt_number.saturating_sub(One::one()); + HeadReceiptNumber::::insert(domain_id, new_head_receipt_number); + actual_weight = actual_weight.saturating_add(T::DbWeight::get().reads_writes(0, 1)); + + Self::deposit_event(Event::PrunedExecutionReceipt { + domain_id, + new_head_receipt_number: Some(new_head_receipt_number), + }); + + Ok(Some(actual_weight).into()) + } } #[pallet::genesis_config] @@ -1950,6 +2048,11 @@ impl Pallet { pre_dispatch: bool, ) -> Result<(), BundleError> { let domain_id = opaque_bundle.domain_id(); + ensure!( + !FrozenDomains::::get().contains(&domain_id), + BundleError::DomainFrozen + ); + let operator_id = opaque_bundle.operator_id(); let sealed_header = &opaque_bundle.sealed_header; let slot_number = opaque_bundle.slot_number(); @@ -2466,6 +2569,11 @@ impl Pallet { ) } + pub fn max_prune_domain_execution_receipt() -> Weight { + T::WeightInfo::handle_bad_receipt(MAX_BUNLDE_PER_BLOCK) + .saturating_add(T::DbWeight::get().reads_writes(3, 1)) + } + fn actual_epoch_transition_weight(epoch_transition_res: EpochTransitionResult) -> Weight { let EpochTransitionResult { rewarded_operator_count, From 360e29dec11a11e51537fba2ce13ab4b5b17852d Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Thu, 11 Jul 2024 22:10:32 +0300 Subject: [PATCH 02/37] Improve plotting chunks handling with received sector size check and better logging --- .../src/single_disk_farm/plotting.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/crates/subspace-farmer/src/single_disk_farm/plotting.rs b/crates/subspace-farmer/src/single_disk_farm/plotting.rs index 45691ddbcd..b496a7b5a0 100644 --- a/crates/subspace-farmer/src/single_disk_farm/plotting.rs +++ b/crates/subspace-farmer/src/single_disk_farm/plotting.rs @@ -568,6 +568,7 @@ async fn plot_single_sector_internal( { let sector_write_base_offset = u64::from(sector_index) * sector_size as u64; + let mut total_received = 0; let mut sector_write_offset = sector_write_base_offset; while let Some(maybe_sector_chunk) = sector.next().await { let sector_chunk = match maybe_sector_chunk { @@ -578,6 +579,16 @@ async fn plot_single_sector_internal( )))); } }; + + total_received += sector_chunk.len(); + + if total_received > sector_size { + return Ok(Err(PlottingError::LowLevel(format!( + "Received too many bytes {total_received} instead of expected \ + {sector_size} bytes" + )))); + } + let sector_chunk_size = sector_chunk.len() as u64; trace!(sector_chunk_size, "Writing sector chunk to disk"); @@ -594,9 +605,9 @@ async fn plot_single_sector_internal( } drop(sector); - if (sector_write_offset - sector_write_base_offset) != sector_size as u64 { + if total_received != sector_size { return Ok(Err(PlottingError::LowLevel(format!( - "Received only {sector_write_offset} sector bytes out of {sector_size} \ + "Received only {total_received} sector bytes out of {sector_size} \ expected bytes" )))); } From bd61bc6c3195fd37d449be5de1f0e1249966d121 Mon Sep 17 00:00:00 2001 From: vedhavyas Date: Fri, 12 Jul 2024 12:24:06 +0530 Subject: [PATCH 03/37] store confirmed er instead of subset of er fields for each domain --- crates/pallet-domains/src/benchmarking.rs | 50 +++++---- crates/pallet-domains/src/block_tree.rs | 34 ++---- crates/pallet-domains/src/lib.rs | 30 +++--- crates/pallet-domains/src/migrations.rs | 117 ++++++++++++++++++--- crates/pallet-domains/src/staking.rs | 54 ++++++---- crates/pallet-domains/src/staking_epoch.rs | 51 +++++---- crates/sp-domains/src/lib.rs | 15 --- domains/pallets/messenger/src/lib.rs | 10 +- 8 files changed, 230 insertions(+), 131 deletions(-) diff --git a/crates/pallet-domains/src/benchmarking.rs b/crates/pallet-domains/src/benchmarking.rs index aa1a604eb1..511e4ee828 100644 --- a/crates/pallet-domains/src/benchmarking.rs +++ b/crates/pallet-domains/src/benchmarking.rs @@ -28,8 +28,8 @@ use frame_system::{Pallet as System, RawOrigin}; use sp_core::crypto::{Ss58Codec, UncheckedFrom}; use sp_core::ByteArray; use sp_domains::{ - dummy_opaque_bundle, ConfirmedDomainBlock, DomainId, ExecutionReceipt, OperatorAllowList, - OperatorId, OperatorPublicKey, OperatorSignature, PermissionedActionAllowedBy, RuntimeType, + dummy_opaque_bundle, BlockFees, DomainId, ExecutionReceipt, OperatorAllowList, OperatorId, + OperatorPublicKey, OperatorSignature, PermissionedActionAllowedBy, RuntimeType, Transfers, }; use sp_domains_fraud_proof::fraud_proof::FraudProof; use sp_runtime::traits::{CheckedAdd, One, Zero}; @@ -753,19 +753,26 @@ mod benchmarks { do_finalize_domain_epoch_staking::(domain_id) .expect("finalize domain staking should success"); - // Update the `LatestConfirmedDomainBlock` so unlock can success + // Update the `LatestConfirmedDomainExecutionReceipt` so unlock can success let confirmed_domain_block_number = Pallet::::latest_confirmed_domain_block_number(domain_id) + T::StakeWithdrawalLockingPeriod::get() + One::one(); - LatestConfirmedDomainBlock::::insert( + LatestConfirmedDomainExecutionReceipt::::insert( domain_id, - ConfirmedDomainBlock { - block_number: confirmed_domain_block_number, - block_hash: Default::default(), - parent_block_receipt_hash: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), + ExecutionReceiptOf:: { + domain_block_number: confirmed_domain_block_number, + domain_block_hash: Default::default(), + domain_block_extrinsic_root: Default::default(), + parent_domain_block_receipt_hash: Default::default(), + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: Default::default(), + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), }, ); @@ -797,19 +804,26 @@ mod benchmarks { operator_id, )); - // Update the `LatestConfirmedDomainBlock` so unlock can success + // Update the `LatestConfirmedDomainExecutionReceipt` so unlock can success let confirmed_domain_block_number = Pallet::::latest_confirmed_domain_block_number(domain_id) + T::StakeWithdrawalLockingPeriod::get() + One::one(); - LatestConfirmedDomainBlock::::insert( + LatestConfirmedDomainExecutionReceipt::::insert( domain_id, - ConfirmedDomainBlock { - block_number: confirmed_domain_block_number, - block_hash: Default::default(), - parent_block_receipt_hash: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), + ExecutionReceiptOf:: { + domain_block_number: confirmed_domain_block_number, + domain_block_hash: Default::default(), + domain_block_extrinsic_root: Default::default(), + parent_domain_block_receipt_hash: Default::default(), + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: Default::default(), + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), }, ); diff --git a/crates/pallet-domains/src/block_tree.rs b/crates/pallet-domains/src/block_tree.rs index 5d57de1240..22e609a618 100644 --- a/crates/pallet-domains/src/block_tree.rs +++ b/crates/pallet-domains/src/block_tree.rs @@ -7,7 +7,7 @@ use crate::{ BalanceOf, BlockTree, BlockTreeNodeFor, BlockTreeNodes, Config, ConsensusBlockHash, DomainBlockNumberFor, DomainHashingFor, DomainRuntimeUpgradeRecords, ExecutionInbox, ExecutionReceiptOf, HeadReceiptExtended, HeadReceiptNumber, InboxedBundleAuthor, - LatestConfirmedDomainBlock, LatestSubmittedER, Pallet, ReceiptHashFor, + LatestConfirmedDomainExecutionReceipt, LatestSubmittedER, Pallet, ReceiptHashFor, }; #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -18,8 +18,8 @@ use scale_info::TypeInfo; use sp_core::Get; use sp_domains::merkle_tree::MerkleTree; use sp_domains::{ - ChainId, ConfirmedDomainBlock, DomainId, DomainsTransfersTracker, ExecutionReceipt, - OnChainRewards, OperatorId, Transfers, + ChainId, DomainId, DomainsTransfersTracker, ExecutionReceipt, OnChainRewards, OperatorId, + Transfers, }; use sp_runtime::traits::{BlockNumberProvider, CheckedSub, One, Saturating, Zero}; use sp_std::cmp::Ordering; @@ -371,6 +371,11 @@ pub(crate) fn process_execution_receipt( // its receipt's `extrinsics_root` anymore. let _ = ExecutionInbox::::clear_prefix((domain_id, to_prune), u32::MAX, None); + LatestConfirmedDomainExecutionReceipt::::insert( + domain_id, + execution_receipt.clone(), + ); + ConsensusBlockHash::::remove( domain_id, execution_receipt.consensus_block_number, @@ -405,18 +410,6 @@ pub(crate) fn process_execution_receipt( T::OnChainRewards::on_chain_rewards(chain_id, reward) }); - LatestConfirmedDomainBlock::::insert( - domain_id, - ConfirmedDomainBlock { - block_number: to_prune, - block_hash: execution_receipt.domain_block_hash, - parent_block_receipt_hash: execution_receipt - .parent_domain_block_receipt_hash, - state_root: execution_receipt.final_state_root, - extrinsics_root: execution_receipt.domain_block_extrinsic_root, - }, - ); - return Ok(Some(ConfirmedDomainBlockInfo { domain_block_number: to_prune, operator_ids, @@ -559,16 +552,7 @@ pub(crate) fn import_genesis_receipt( let er_hash = genesis_receipt.hash::>(); let domain_block_number = genesis_receipt.domain_block_number; - LatestConfirmedDomainBlock::::insert( - domain_id, - ConfirmedDomainBlock { - block_number: domain_block_number, - block_hash: genesis_receipt.domain_block_hash, - parent_block_receipt_hash: Default::default(), - state_root: genesis_receipt.final_state_root, - extrinsics_root: genesis_receipt.domain_block_extrinsic_root, - }, - ); + LatestConfirmedDomainExecutionReceipt::::insert(domain_id, genesis_receipt.clone()); let block_tree_node = BlockTreeNode { execution_receipt: genesis_receipt, diff --git a/crates/pallet-domains/src/lib.rs b/crates/pallet-domains/src/lib.rs index b342a710ee..8a1610c5f6 100644 --- a/crates/pallet-domains/src/lib.rs +++ b/crates/pallet-domains/src/lib.rs @@ -203,8 +203,8 @@ mod pallet { use crate::MAX_NOMINATORS_TO_SLASH; use crate::{ BalanceOf, BlockSlot, BlockTreeNodeFor, DomainBlockNumberFor, ElectionVerificationParams, - FraudProofFor, HoldIdentifier, NominatorId, OpaqueBundleOf, ReceiptHashFor, StateRootOf, - MAX_BUNLDE_PER_BLOCK, STORAGE_VERSION, + ExecutionReceiptOf, FraudProofFor, HoldIdentifier, NominatorId, OpaqueBundleOf, + ReceiptHashFor, StateRootOf, MAX_BUNLDE_PER_BLOCK, STORAGE_VERSION, }; #[cfg(not(feature = "std"))] use alloc::string::String; @@ -223,10 +223,9 @@ mod pallet { use sp_core::H256; use sp_domains::bundle_producer_election::ProofOfElectionError; use sp_domains::{ - BundleDigest, ConfirmedDomainBlock, DomainBundleSubmitted, DomainId, DomainSudoCall, - DomainsTransfersTracker, EpochIndex, GenesisDomain, OnChainRewards, OnDomainInstantiated, - OperatorAllowList, OperatorId, OperatorPublicKey, OperatorSignature, RuntimeId, - RuntimeObject, RuntimeType, + BundleDigest, DomainBundleSubmitted, DomainId, DomainSudoCall, DomainsTransfersTracker, + EpochIndex, GenesisDomain, OnChainRewards, OnDomainInstantiated, OperatorAllowList, + OperatorId, OperatorPublicKey, OperatorSignature, RuntimeId, RuntimeObject, RuntimeType, }; use sp_domains_fraud_proof::fraud_proof_runtime_interface::domain_runtime_call; use sp_domains_fraud_proof::storage_proof::{self, FraudProofStorageKeyProvider}; @@ -662,13 +661,8 @@ mod pallet { /// Storage to hold all the domain's latest confirmed block. #[pallet::storage] - pub(super) type LatestConfirmedDomainBlock = StorageMap< - _, - Identity, - DomainId, - ConfirmedDomainBlock, T::DomainHash>, - OptionQuery, - >; + pub(super) type LatestConfirmedDomainExecutionReceipt = + StorageMap<_, Identity, DomainId, ExecutionReceiptOf, OptionQuery>; /// The latest ER submitted by the operator for a given domain. It is used to determine if the operator /// has submitted bad ER and is pending to slash. @@ -2328,16 +2322,16 @@ impl Pallet { /// Returns the latest confirmed domain block number for a given domain /// Zero block is always a default confirmed block. pub fn latest_confirmed_domain_block_number(domain_id: DomainId) -> DomainBlockNumberFor { - LatestConfirmedDomainBlock::::get(domain_id) - .map(|block| block.block_number) + LatestConfirmedDomainExecutionReceipt::::get(domain_id) + .map(|er| er.domain_block_number) .unwrap_or_default() } pub fn latest_confirmed_domain_block( domain_id: DomainId, ) -> Option<(DomainBlockNumberFor, T::DomainHash)> { - LatestConfirmedDomainBlock::::get(domain_id) - .map(|block| (block.block_number, block.block_hash)) + LatestConfirmedDomainExecutionReceipt::::get(domain_id) + .map(|er| (er.domain_block_number, er.domain_block_hash)) } /// Returns the domain block limit of the given domain. @@ -2407,7 +2401,7 @@ impl Pallet { } pub fn confirmed_domain_block_storage_key(domain_id: DomainId) -> Vec { - LatestConfirmedDomainBlock::::hashed_key_for(domain_id) + LatestConfirmedDomainExecutionReceipt::::hashed_key_for(domain_id) } pub fn is_bad_er_pending_to_prune( diff --git a/crates/pallet-domains/src/migrations.rs b/crates/pallet-domains/src/migrations.rs index fe047eff26..11a90ee95b 100644 --- a/crates/pallet-domains/src/migrations.rs +++ b/crates/pallet-domains/src/migrations.rs @@ -12,17 +12,24 @@ impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { } pub(super) mod runtime_registry_instance_count_migration { - use crate::pallet::{DomainRegistry, RuntimeRegistry as RuntimeRegistryV1}; - use crate::{Config, DomainSudoCalls}; + use crate::pallet::{ + DomainRegistry, LatestConfirmedDomainExecutionReceipt, RuntimeRegistry as RuntimeRegistryV1, + }; + use crate::{Config, DomainBlockNumberFor, DomainSudoCalls, ExecutionReceiptOf}; #[cfg(not(feature = "std"))] use alloc::string::String; - use codec::{Decode, Encode}; + #[cfg(not(feature = "std"))] + use alloc::vec; + use codec::{Codec, Decode, Encode}; use frame_support::pallet_prelude::{OptionQuery, TypeInfo, Weight}; use frame_support::{storage_alias, Identity}; use frame_system::pallet_prelude::BlockNumberFor; use sp_core::Get; use sp_domains::storage::RawGenesis; - use sp_domains::{DomainSudoCall, RuntimeId, RuntimeObject as RuntimeObjectV1, RuntimeType}; + use sp_domains::{ + BlockFees, DomainId, DomainSudoCall, RuntimeId, RuntimeObject as RuntimeObjectV1, + RuntimeType, Transfers, + }; use sp_version::RuntimeVersion; #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq)] @@ -39,6 +46,21 @@ pub(super) mod runtime_registry_instance_count_migration { pub updated_at: Number, } + /// Type holding the block details of confirmed domain block. + #[derive(TypeInfo, Encode, Decode, Debug, Clone, PartialEq, Eq)] + pub struct ConfirmedDomainBlock { + /// Block number of the confirmed domain block. + pub block_number: DomainBlockNumber, + /// Block hash of the confirmed domain block. + pub block_hash: DomainHash, + /// Parent block hash of the confirmed domain block. + pub parent_block_receipt_hash: DomainHash, + /// State root of the domain block. + pub state_root: DomainHash, + /// Extrinsic root of the domain block. + pub extrinsics_root: DomainHash, + } + #[storage_alias] pub type RuntimeRegistry = StorageMap< crate::Pallet, @@ -48,18 +70,58 @@ pub(super) mod runtime_registry_instance_count_migration { OptionQuery, >; + #[storage_alias] + pub(super) type LatestConfirmedDomainBlock = StorageMap< + crate::Pallet, + Identity, + DomainId, + ConfirmedDomainBlock, ::DomainHash>, + OptionQuery, + >; + // Return the number of domain instance that instantiated with the given runtime - fn domain_instance_count(runtime_id: RuntimeId) -> (u32, u64) { - let mut read_write_count = 0; + fn domain_instance_count(runtime_id: RuntimeId) -> (u32, (u64, u64)) { + let (mut read_count, mut write_count) = (0, 0); ( DomainRegistry::::iter() .filter(|(domain_id, domain_obj)| { - read_write_count += 1; + read_count += 1; + + // migrate domain sudo call DomainSudoCalls::::insert(domain_id, DomainSudoCall { maybe_call: None }); + write_count += 1; + + // migrate domain latest confirmed domain block + read_count += 1; + match LatestConfirmedDomainBlock::::take(domain_id) { + None => {} + Some(confirmed_domain_block) => { + write_count += 1; + LatestConfirmedDomainExecutionReceipt::::insert( + domain_id, + ExecutionReceiptOf:: { + domain_block_number: confirmed_domain_block.block_number, + domain_block_hash: confirmed_domain_block.block_hash, + domain_block_extrinsic_root: confirmed_domain_block + .extrinsics_root, + parent_domain_block_receipt_hash: confirmed_domain_block + .parent_block_receipt_hash, + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: confirmed_domain_block.state_root, + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), + }, + ) + } + } domain_obj.domain_config.runtime_id == runtime_id }) .count() as u32, - read_write_count, + (read_count, write_count), ) } @@ -67,7 +129,8 @@ pub(super) mod runtime_registry_instance_count_migration { pub(super) fn migrate_runtime_registry_storages() -> Weight { let (mut read_count, mut write_count) = (0, 0); RuntimeRegistry::::drain().for_each(|(runtime_id, runtime_obj)| { - let (instance_count, domain_read_write_count) = domain_instance_count::(runtime_id); + let (instance_count, (domain_read_count, domain_write_count)) = + domain_instance_count::(runtime_id); RuntimeRegistryV1::::set( runtime_id, Some(RuntimeObjectV1 { @@ -84,9 +147,9 @@ pub(super) mod runtime_registry_instance_count_migration { ); // domain_read_count + 1 since we read the old runtime registry as well - read_count += domain_read_write_count + 1; - // 1 write to new registry and 1 for old registry + domain_write_count to load Sudo Domain runtime call. - write_count += 2 + domain_read_write_count; + read_count += domain_read_count + 1; + // 1 write to new registry and 1 for old registry + domain_write_count. + write_count += 2 + domain_write_count; }); T::DbWeight::get().reads_writes(read_count, write_count) @@ -97,9 +160,11 @@ pub(super) mod runtime_registry_instance_count_migration { mod tests { use crate::domain_registry::{do_instantiate_domain, DomainConfig}; use crate::migrations::runtime_registry_instance_count_migration::{ - RuntimeObject, RuntimeRegistry, + ConfirmedDomainBlock, LatestConfirmedDomainBlock, RuntimeObject, RuntimeRegistry, + }; + use crate::pallet::{ + LatestConfirmedDomainExecutionReceipt, RuntimeRegistry as RuntimeRegistryV1, }; - use crate::pallet::RuntimeRegistry as RuntimeRegistryV1; use crate::tests::{new_test_ext, Balances, Test}; use crate::{Config, DomainSudoCalls}; use domain_runtime_primitives::{AccountId20, AccountId20Converter}; @@ -132,7 +197,7 @@ mod tests { }; let creator = 1u128; - ext.execute_with(|| { + let expected_er = ext.execute_with(|| { // create old registry RuntimeRegistry::::insert( domain_config.runtime_id, @@ -186,6 +251,18 @@ mod tests { ); do_instantiate_domain::(domain_config.clone(), creator, 0u64).unwrap(); + let er = LatestConfirmedDomainExecutionReceipt::::take(DomainId::new(0)).unwrap(); + LatestConfirmedDomainBlock::::insert( + DomainId::new(0), + ConfirmedDomainBlock { + block_number: er.domain_block_number, + block_hash: er.domain_block_hash, + parent_block_receipt_hash: er.parent_domain_block_receipt_hash, + state_root: er.final_state_root, + extrinsics_root: er.domain_block_extrinsic_root, + }, + ); + er }); ext.commit_all().unwrap(); @@ -195,7 +272,7 @@ mod tests { crate::migrations::runtime_registry_instance_count_migration::migrate_runtime_registry_storages::(); assert_eq!( weights, - ::DbWeight::get().reads_writes(2, 3), + ::DbWeight::get().reads_writes(3, 4), ); assert_eq!( @@ -204,6 +281,14 @@ mod tests { ); assert!(DomainSudoCalls::::contains_key(DomainId::new(0))); + + assert!(LatestConfirmedDomainBlock::::get(DomainId::new(0)).is_none()); + let er = LatestConfirmedDomainExecutionReceipt::::get(DomainId::new(0)).unwrap(); + assert_eq!(er.domain_block_number, expected_er.domain_block_number); + assert_eq!(er.domain_block_hash, expected_er.domain_block_hash); + assert_eq!(er.domain_block_extrinsic_root, expected_er.domain_block_extrinsic_root); + assert_eq!(er.final_state_root, expected_er.final_state_root); + assert_eq!(er.parent_domain_block_receipt_hash, expected_er.parent_domain_block_receipt_hash); }); } } diff --git a/crates/pallet-domains/src/staking.rs b/crates/pallet-domains/src/staking.rs index 3b062bc89e..a310e618f1 100644 --- a/crates/pallet-domains/src/staking.rs +++ b/crates/pallet-domains/src/staking.rs @@ -1311,8 +1311,9 @@ pub(crate) fn do_mark_operators_as_slashed( pub(crate) mod tests { use crate::domain_registry::{DomainConfig, DomainObject}; use crate::pallet::{ - Config, Deposits, DomainRegistry, DomainStakingSummary, LatestConfirmedDomainBlock, - NextOperatorId, NominatorCount, OperatorIdOwner, Operators, PendingSlashes, Withdrawals, + Config, Deposits, DomainRegistry, DomainStakingSummary, + LatestConfirmedDomainExecutionReceipt, NextOperatorId, NominatorCount, OperatorIdOwner, + Operators, PendingSlashes, Withdrawals, }; use crate::staking::{ do_convert_previous_epoch_withdrawal, do_mark_operators_as_slashed, do_nominate_operator, @@ -1322,7 +1323,8 @@ pub(crate) mod tests { use crate::staking_epoch::{do_finalize_domain_current_epoch, do_slash_operator}; use crate::tests::{new_test_ext, ExistentialDeposit, RuntimeOrigin, Test}; use crate::{ - bundle_storage_fund, BalanceOf, Error, NominatorId, SlashedReason, MAX_NOMINATORS_TO_SLASH, + bundle_storage_fund, BalanceOf, Error, ExecutionReceiptOf, NominatorId, SlashedReason, + MAX_NOMINATORS_TO_SLASH, }; use codec::Encode; use frame_support::traits::fungible::Mutate; @@ -1332,8 +1334,8 @@ pub(crate) mod tests { use sp_core::crypto::UncheckedFrom; use sp_core::{sr25519, Pair, U256}; use sp_domains::{ - ConfirmedDomainBlock, DomainId, OperatorAllowList, OperatorId, OperatorPair, - OperatorPublicKey, OperatorSignature, + BlockFees, DomainId, OperatorAllowList, OperatorId, OperatorPair, OperatorPublicKey, + OperatorSignature, Transfers, }; use sp_runtime::traits::Zero; use sp_runtime::{PerThing, Perbill}; @@ -1889,14 +1891,21 @@ pub(crate) mod tests { let nominator_count = NominatorCount::::get(operator_id); let confirmed_domain_block = 100; - LatestConfirmedDomainBlock::::insert( + LatestConfirmedDomainExecutionReceipt::::insert( domain_id, - ConfirmedDomainBlock { - block_number: confirmed_domain_block, - block_hash: Default::default(), - parent_block_receipt_hash: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), + ExecutionReceiptOf:: { + domain_block_number: confirmed_domain_block, + domain_block_hash: Default::default(), + domain_block_extrinsic_root: Default::default(), + parent_domain_block_receipt_hash: Default::default(), + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: Default::default(), + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), }, ); @@ -1957,14 +1966,21 @@ pub(crate) mod tests { // staking withdrawal is 5 blocks // to unlock funds, confirmed block should be atleast 105 let confirmed_domain_block = 105; - LatestConfirmedDomainBlock::::insert( + LatestConfirmedDomainExecutionReceipt::::insert( domain_id, - ConfirmedDomainBlock { - block_number: confirmed_domain_block, - block_hash: Default::default(), - parent_block_receipt_hash: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), + ExecutionReceiptOf:: { + domain_block_number: confirmed_domain_block, + domain_block_hash: Default::default(), + domain_block_extrinsic_root: Default::default(), + parent_domain_block_receipt_hash: Default::default(), + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: Default::default(), + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), }, ); assert_ok!(do_unlock_funds::(operator_id, nominator_id)); diff --git a/crates/pallet-domains/src/staking_epoch.rs b/crates/pallet-domains/src/staking_epoch.rs index b76c6d4b1b..4010d66fb2 100644 --- a/crates/pallet-domains/src/staking_epoch.rs +++ b/crates/pallet-domains/src/staking_epoch.rs @@ -526,8 +526,9 @@ pub(crate) fn do_slash_operator( mod tests { use crate::bundle_storage_fund::STORAGE_FEE_RESERVE; use crate::pallet::{ - Deposits, DomainStakingSummary, LastEpochStakingDistribution, LatestConfirmedDomainBlock, - NominatorCount, OperatorIdOwner, OperatorSigningKey, Operators, Withdrawals, + Deposits, DomainStakingSummary, LastEpochStakingDistribution, + LatestConfirmedDomainExecutionReceipt, NominatorCount, OperatorIdOwner, OperatorSigningKey, + Operators, Withdrawals, }; use crate::staking::tests::{register_operator, Share}; use crate::staking::{ @@ -538,13 +539,13 @@ mod tests { do_finalize_domain_current_epoch, operator_take_reward_tax_and_stake, }; use crate::tests::{new_test_ext, Test}; - use crate::{BalanceOf, Config, HoldIdentifier, NominatorId}; + use crate::{BalanceOf, Config, ExecutionReceiptOf, HoldIdentifier, NominatorId}; use codec::Encode; use frame_support::assert_ok; use frame_support::traits::fungible::InspectHold; use sp_core::{Pair, U256}; use sp_domains::{ - ConfirmedDomainBlock, DomainId, OperatorPair, OperatorSigningKeyProofOfOwnershipData, + BlockFees, DomainId, OperatorPair, OperatorSigningKeyProofOfOwnershipData, Transfers, }; use sp_runtime::traits::Zero; use sp_runtime::{PerThing, Percent}; @@ -618,14 +619,21 @@ mod tests { // de-register operator let domain_block_number = 100; - LatestConfirmedDomainBlock::::insert( + LatestConfirmedDomainExecutionReceipt::::insert( domain_id, - ConfirmedDomainBlock { - block_number: domain_block_number, - block_hash: Default::default(), - parent_block_receipt_hash: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), + ExecutionReceiptOf:: { + domain_block_number, + domain_block_hash: Default::default(), + domain_block_extrinsic_root: Default::default(), + parent_domain_block_receipt_hash: Default::default(), + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: Default::default(), + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), }, ); do_deregister_operator::(operator_account, operator_id).unwrap(); @@ -636,14 +644,21 @@ mod tests { // staking withdrawal is 5 blocks, // to unlock funds, confirmed block should be atleast 105 let domain_block_number = 105; - LatestConfirmedDomainBlock::::insert( + LatestConfirmedDomainExecutionReceipt::::insert( domain_id, - ConfirmedDomainBlock { - block_number: domain_block_number, - block_hash: Default::default(), - parent_block_receipt_hash: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), + ExecutionReceiptOf:: { + domain_block_number, + domain_block_hash: Default::default(), + domain_block_extrinsic_root: Default::default(), + parent_domain_block_receipt_hash: Default::default(), + consensus_block_number: Default::default(), + consensus_block_hash: Default::default(), + inboxed_bundles: vec![], + final_state_root: Default::default(), + execution_trace: vec![], + execution_trace_root: Default::default(), + block_fees: BlockFees::default(), + transfers: Transfers::default(), }, ); diff --git a/crates/sp-domains/src/lib.rs b/crates/sp-domains/src/lib.rs index 31589dce3f..a6eaf713f3 100644 --- a/crates/sp-domains/src/lib.rs +++ b/crates/sp-domains/src/lib.rs @@ -776,21 +776,6 @@ impl ProofOfElection { } } -/// Type holding the block details of confirmed domain block. -#[derive(TypeInfo, Encode, Decode, Debug, Clone, PartialEq, Eq)] -pub struct ConfirmedDomainBlock { - /// Block number of the confirmed domain block. - pub block_number: DomainBlockNumber, - /// Block hash of the confirmed domain block. - pub block_hash: DomainHash, - /// Parent block hash of the confirmed domain block. - pub parent_block_receipt_hash: DomainHash, - /// State root of the domain block. - pub state_root: DomainHash, - /// Extrinsic root of the domain block. - pub extrinsics_root: DomainHash, -} - /// Type that represents an operator allow list for Domains. #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum OperatorAllowList { diff --git a/domains/pallets/messenger/src/lib.rs b/domains/pallets/messenger/src/lib.rs index b82dca11bc..9a9fceb82c 100644 --- a/domains/pallets/messenger/src/lib.rs +++ b/domains/pallets/messenger/src/lib.rs @@ -1265,7 +1265,13 @@ mod pallet { .ok_or(UnknownTransaction::CannotLookup)?; StorageProofVerifier::::get_decoded_value::< - sp_domains::ConfirmedDomainBlock, T::Hash>, + sp_domains::ExecutionReceipt< + BlockNumberFor, + T::Hash, + BlockNumberFor, + T::Hash, + BalanceOf, + >, >( &consensus_state_root, domain_proof, @@ -1279,7 +1285,7 @@ mod pallet { ); TransactionValidityError::Invalid(InvalidTransaction::BadProof) })? - .state_root + .final_state_root } else { consensus_state_root }; From 25fbb2a8fa5d12c26f647121ec64e112fafb0094 Mon Sep 17 00:00:00 2001 From: linning Date: Fri, 12 Jul 2024 19:45:32 +0800 Subject: [PATCH 04/37] Introduce the auto-id domain test runtime It is mostly copy-paste from the production runtime with some minor changes to the runtime version and impl test runtime api Signed-off-by: linning --- domains/test/runtime/auto-id/Cargo.toml | 126 +++ domains/test/runtime/auto-id/build.rs | 13 + domains/test/runtime/auto-id/src/lib.rs | 1031 +++++++++++++++++++++++ 3 files changed, 1170 insertions(+) create mode 100644 domains/test/runtime/auto-id/Cargo.toml create mode 100644 domains/test/runtime/auto-id/build.rs create mode 100644 domains/test/runtime/auto-id/src/lib.rs diff --git a/domains/test/runtime/auto-id/Cargo.toml b/domains/test/runtime/auto-id/Cargo.toml new file mode 100644 index 0000000000..38a2e2dcb7 --- /dev/null +++ b/domains/test/runtime/auto-id/Cargo.toml @@ -0,0 +1,126 @@ +[package] +name = "auto-id-domain-test-runtime" +version = "0.1.0" +authors = ["Subspace Labs "] +license = "Apache-2.0" +homepage = "https://subspace.network" +repository = "https://github.com/subspace/subspace/" +edition = "2021" +description = "Subspace AutoId domain test runtime" +include = [ + "/src", + "/build.rs", + "/Cargo.toml", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +domain-pallet-executive = { version = "0.1.0", path = "../../../pallets/executive", default-features = false } +domain-runtime-primitives = { version = "0.1.0", path = "../../../primitives/runtime", default-features = false } +domain-test-primitives = { version = "0.1.0", path = "../../primitives", default-features = false } +frame-benchmarking = { default-features = false, optional = true, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +frame-support = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +frame-system = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +frame-system-benchmarking = { default-features = false, optional = true, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +frame-system-rpc-runtime-api = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +pallet-auto-id = { version = "0.1.0", path = "../../../pallets/auto-id", default-features = false } +pallet-balances = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +pallet-block-fees = { version = "0.1.0", path = "../../../pallets/block-fees", default-features = false } +pallet-domain-id = { version = "0.1.0", path = "../../../pallets/domain-id", default-features = false } +pallet-domain-sudo = { version = "0.1.0", path = "../../../pallets/domain-sudo", default-features = false } +pallet-messenger = { version = "0.1.0", path = "../../../pallets/messenger", default-features = false } +pallet-timestamp = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +pallet-transaction-payment = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +pallet-transaction-payment-rpc-runtime-api = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +pallet-transporter = { version = "0.1.0", path = "../../../pallets/transporter", default-features = false } +scale-info = { version = "2.11.2", default-features = false, features = ["derive"] } +sp-api = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-block-builder = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-core = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-domains = { version = "0.1.0", path = "../../../../crates/sp-domains", default-features = false } +sp-domain-sudo = { version = "0.1.0", path = "../../../primitives/domain-sudo", default-features = false } +sp-genesis-builder = { git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef", default-features = false } +sp-inherents = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-messenger = { version = "0.1.0", default-features = false, path = "../../../primitives/messenger" } +sp-messenger-host-functions = { version = "0.1.0", default-features = false, path = "../../../primitives/messenger-host-functions" } +sp-mmr-primitives = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-offchain = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-runtime = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-session = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-std = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-storage = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef", optional = true } +sp-subspace-mmr = { version = "0.1.0", default-features = false, path = "../../../../crates/sp-subspace-mmr" } +sp-transaction-pool = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +sp-version = { default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef" } +subspace-core-primitives = { version = "0.1.0", path = "../../../../crates/subspace-core-primitives", default-features = false } +subspace-runtime-primitives = { version = "0.1.0", path = "../../../../crates/subspace-runtime-primitives", default-features = false } + +[dev-dependencies] +subspace-runtime-primitives = { version = "0.1.0", features = ["testing"], path = "../../../../crates/subspace-runtime-primitives" } + +[build-dependencies] +substrate-wasm-builder = { git = "https://github.com/subspace/polkadot-sdk", rev = "0cbfcb0232bbf71ac5b14cc8c99bf043cec420ef", optional = true } + +[features] +default = [ + "std", +] +std = [ + "codec/std", + "domain-pallet-executive/std", + "domain-runtime-primitives/std", + "domain-test-primitives/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "frame-system-rpc-runtime-api/std", + "pallet-auto-id/std", + "pallet-balances/std", + "pallet-block-fees/std", + "pallet-domain-id/std", + "pallet-domain-sudo/std", + "pallet-messenger/std", + "pallet-timestamp/std", + "pallet-transaction-payment/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transporter/std", + "scale-info/std", + "sp-api/std", + "sp-block-builder/std", + "sp-core/std", + "sp-domains/std", + "sp-domain-sudo/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-messenger/std", + "sp-messenger-host-functions/std", + "sp-mmr-primitives/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage?/std", + "sp-subspace-mmr/std", + "sp-transaction-pool/std", + "sp-version/std", + "subspace-core-primitives/std", + "subspace-runtime-primitives/std", + "substrate-wasm-builder", +] +runtime-benchmarks = [ + "domain-pallet-executive/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-storage", + "frame-benchmarking", + "frame-system-benchmarking", + "frame-system-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-auto-id/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-messenger/runtime-benchmarks", + "pallet-domain-id/runtime-benchmarks" +] diff --git a/domains/test/runtime/auto-id/build.rs b/domains/test/runtime/auto-id/build.rs new file mode 100644 index 0000000000..c317f99958 --- /dev/null +++ b/domains/test/runtime/auto-id/build.rs @@ -0,0 +1,13 @@ +fn main() { + #[cfg(feature = "std")] + { + // TODO: Workaround for https://github.com/paritytech/polkadot-sdk/issues/3192 + std::env::set_var("CFLAGS", "-mcpu=mvp"); + std::env::set_var("WASM_BUILD_TYPE", "release"); + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build(); + } +} diff --git a/domains/test/runtime/auto-id/src/lib.rs b/domains/test/runtime/auto-id/src/lib.rs new file mode 100644 index 0000000000..167d024d74 --- /dev/null +++ b/domains/test/runtime/auto-id/src/lib.rs @@ -0,0 +1,1031 @@ +#![feature(variant_count)] +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +#[cfg(not(feature = "std"))] +extern crate alloc; + +#[cfg(not(feature = "std"))] +use alloc::format; +use codec::{Decode, Encode, MaxEncodedLen}; +use domain_runtime_primitives::opaque::Header; +pub use domain_runtime_primitives::{ + block_weights, maximum_block_length, opaque, AccountId, Address, Balance, BlockNumber, Hash, + Nonce, Signature, EXISTENTIAL_DEPOSIT, +}; +use domain_runtime_primitives::{ + CheckExtrinsicsValidityError, DecodeExtrinsicError, ERR_BALANCE_OVERFLOW, SLOT_DURATION, +}; +use frame_support::dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo}; +use frame_support::genesis_builder_helper::{build_state, get_preset}; +use frame_support::inherent::ProvideInherent; +use frame_support::pallet_prelude::TypeInfo; +use frame_support::traits::fungible::Credit; +use frame_support::traits::{ + ConstU16, ConstU32, ConstU64, Everything, Imbalance, OnUnbalanced, VariantCount, +}; +use frame_support::weights::constants::ParityDbWeight; +use frame_support::weights::{ConstantMultiplier, IdentityFee, Weight}; +use frame_support::{construct_runtime, parameter_types}; +use frame_system::limits::{BlockLength, BlockWeights}; +use pallet_block_fees::fees::OnChargeDomainTransaction; +use pallet_transporter::EndpointHandler; +use sp_api::impl_runtime_apis; +use sp_core::crypto::KeyTypeId; +use sp_core::{Get, OpaqueMetadata}; +use sp_domains::{ChannelId, DomainAllowlistUpdates, DomainId, MessengerHoldIdentifier, Transfers}; +use sp_messenger::endpoint::{Endpoint, EndpointHandler as EndpointHandlerT, EndpointId}; +use sp_messenger::messages::{ + BlockMessagesWithStorageKey, ChainId, CrossDomainMessage, FeeModel, MessageId, MessageKey, +}; +use sp_messenger_host_functions::{get_storage_key, StorageKeyRequest}; +use sp_mmr_primitives::EncodableOpaqueLeaf; +use sp_runtime::generic::Era; +use sp_runtime::traits::{ + AccountIdLookup, BlakeTwo256, Block as BlockT, Checkable, Keccak256, NumberFor, One, + SignedExtension, ValidateUnsigned, Zero, +}; +use sp_runtime::transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, +}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, ApplyExtrinsicResult, Digest, + ExtrinsicInclusionMode, +}; +pub use sp_runtime::{MultiAddress, Perbill, Permill}; +use sp_std::collections::btree_set::BTreeSet; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; +use sp_subspace_mmr::domain_mmr_runtime_interface::{ + is_consensus_block_finalized, verify_mmr_proof, +}; +use sp_subspace_mmr::{ConsensusChainMmrLeafProof, MmrLeaf}; +use sp_version::RuntimeVersion; +use subspace_runtime_primitives::{ + BlockNumber as ConsensusBlockNumber, Hash as ConsensusBlockHash, Moment, + SlowAdjustingFeeUpdate, SSC, +}; + +/// Block type as expected by this runtime. +pub type Block = generic::Block; + +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; + +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; + +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; + +/// Executive: handles dispatch to the various modules. +pub type Executive = domain_pallet_executive::Executive< + Runtime, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +impl_opaque_keys! { + pub struct SessionKeys { + /// Primarily used for adding the operator signing key into the Keystore. + pub operator: sp_domains::OperatorKey, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("subspace-auto-id-domain"), + impl_name: create_runtime_str!("subspace-auto-id-domain"), + authoring_version: 0, + spec_version: 1, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 0, + state_version: 0, + extrinsic_state_version: 1, +}; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub const BlockHashCount: BlockNumber = 2400; + pub RuntimeBlockLength: BlockLength = maximum_block_length(); + pub RuntimeBlockWeights: BlockWeights = block_weights(); +} + +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type RuntimeCall = RuntimeCall; + /// The aggregated `RuntimeTask` type. + type RuntimeTask = RuntimeTask; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = AccountIdLookup; + /// The type for storing how many extrinsics an account has signed. + type Nonce = Nonce; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The block type. + type Block = Block; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + /// The ubiquitous origin type. + type RuntimeOrigin = RuntimeOrigin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// Converts a module to an index of this module in the runtime. + type PalletInfo = PalletInfo; + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The weight of database operations that the runtime can invoke. + type DbWeight = ParityDbWeight; + /// The basic call filter to use in dispatchable. + type BaseCallFilter = Everything; + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = (); + /// Block & extrinsics weights: base values and limits. + type BlockWeights = RuntimeBlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = RuntimeBlockLength; + type SS58Prefix = ConstU16<2254>; + /// The action to take on a Runtime Upgrade + type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = Moment; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +/// `DustRemovalHandler` used to collect all the SSC dust left when the account is reaped. +pub struct DustRemovalHandler; + +impl OnUnbalanced> for DustRemovalHandler { + fn on_nonzero_unbalanced(dusted_amount: Credit) { + BlockFees::note_burned_balance(dusted_amount.peek()); + } +} + +impl pallet_balances::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxLocks = MaxLocks; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = DustRemovalHandler; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = HoldIdentifier; +} + +parameter_types! { + pub const OperationalFeeMultiplier: u8 = 5; + pub const DomainChainByteFee: Balance = 1; +} + +impl pallet_block_fees::Config for Runtime { + type Balance = Balance; + type DomainChainByteFee = DomainChainByteFee; +} + +pub struct FinalDomainTransactionByteFee; + +impl Get for FinalDomainTransactionByteFee { + fn get() -> Balance { + BlockFees::final_domain_transaction_byte_fee() + } +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = OnChargeDomainTransaction; + type WeightToFee = IdentityFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = OperationalFeeMultiplier; +} + +impl pallet_auto_id::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Time = Timestamp; + type Weights = pallet_auto_id::weights::SubstrateWeight; +} + +pub struct ExtrinsicStorageFees; + +impl domain_pallet_executive::ExtrinsicStorageFees for ExtrinsicStorageFees { + fn extract_signer(xt: UncheckedExtrinsic) -> (Option, DispatchInfo) { + let dispatch_info = xt.get_dispatch_info(); + let lookup = frame_system::ChainContext::::default(); + let maybe_signer = extract_signer_inner(&xt, &lookup).and_then(|res| res.ok()); + (maybe_signer, dispatch_info) + } + + fn on_storage_fees_charged( + charged_fees: Balance, + tx_size: u32, + ) -> Result<(), TransactionValidityError> { + let consensus_storage_fee = BlockFees::consensus_chain_byte_fee() + .checked_mul(Balance::from(tx_size)) + .ok_or(InvalidTransaction::Custom(ERR_BALANCE_OVERFLOW))?; + + let (paid_consensus_storage_fee, paid_domain_fee) = if charged_fees <= consensus_storage_fee + { + (charged_fees, Zero::zero()) + } else { + (consensus_storage_fee, charged_fees - consensus_storage_fee) + }; + + BlockFees::note_consensus_storage_fee(paid_consensus_storage_fee); + BlockFees::note_domain_execution_fee(paid_domain_fee); + Ok(()) + } +} + +impl domain_pallet_executive::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = domain_pallet_executive::weights::SubstrateWeight; + type Currency = Balances; + type LengthToFee = ::LengthToFee; + type ExtrinsicStorageFees = ExtrinsicStorageFees; +} + +parameter_types! { + pub SelfChainId: ChainId = SelfDomainId::self_domain_id().into(); +} + +pub struct OnXDMRewards; + +impl sp_messenger::OnXDMRewards for OnXDMRewards { + fn on_xdm_rewards(rewards: Balance) { + BlockFees::note_domain_execution_fee(rewards) + } + fn on_chain_protocol_fees(chain_id: ChainId, fees: Balance) { + // note the burned balance from this chain + BlockFees::note_burned_balance(fees); + // note the chain rewards + BlockFees::note_chain_rewards(chain_id, fees); + } +} + +type MmrHash = ::Output; + +pub struct MmrProofVerifier; + +impl sp_subspace_mmr::MmrProofVerifier, Hash> for MmrProofVerifier { + fn verify_proof_and_extract_leaf( + mmr_leaf_proof: ConsensusChainMmrLeafProof, Hash, MmrHash>, + ) -> Option> { + let ConsensusChainMmrLeafProof { + consensus_block_number, + opaque_mmr_leaf: opaque_leaf, + proof, + .. + } = mmr_leaf_proof; + + if !is_consensus_block_finalized(consensus_block_number) { + return None; + } + + let leaf: MmrLeaf = + opaque_leaf.into_opaque_leaf().try_decode()?; + + verify_mmr_proof(vec![EncodableOpaqueLeaf::from_leaf(&leaf)], proof.encode()) + .then_some(leaf) + } +} + +pub struct StorageKeys; + +impl sp_messenger::StorageKeys for StorageKeys { + fn confirmed_domain_block_storage_key(domain_id: DomainId) -> Option> { + get_storage_key(StorageKeyRequest::ConfirmedDomainBlockStorageKey(domain_id)) + } + + fn outbox_storage_key(chain_id: ChainId, message_key: MessageKey) -> Option> { + get_storage_key(StorageKeyRequest::OutboxStorageKey { + chain_id, + message_key, + }) + } + + fn inbox_responses_storage_key(chain_id: ChainId, message_key: MessageKey) -> Option> { + get_storage_key(StorageKeyRequest::InboxResponseStorageKey { + chain_id, + message_key, + }) + } +} + +/// Hold identifier for balances for this runtime. +#[derive( + PartialEq, Eq, Clone, Encode, Decode, TypeInfo, MaxEncodedLen, Ord, PartialOrd, Copy, Debug, +)] +pub enum HoldIdentifier { + Messenger(MessengerHoldIdentifier), +} + +impl VariantCount for HoldIdentifier { + // TODO: revist this value, it is used as the max number of hold an account can + // create. Currently, opening an XDM channel will create 1 hold, so this value + // also used as the limit of how many channel an account can open. + // + // TODO: HACK this is not the actual variant count but it is required see + // https://github.com/subspace/subspace/issues/2674 for more details. It + // will be resolved as https://github.com/paritytech/polkadot-sdk/issues/4033. + const VARIANT_COUNT: u32 = 100; +} + +impl pallet_messenger::HoldIdentifier for HoldIdentifier { + fn messenger_channel(dst_chain_id: ChainId, channel_id: ChannelId) -> Self { + Self::Messenger(MessengerHoldIdentifier::Channel((dst_chain_id, channel_id))) + } +} + +parameter_types! { + pub const ChannelReserveFee: Balance = 100 * SSC; + pub const ChannelInitReservePortion: Perbill = Perbill::from_percent(20); + // TODO update the fee model + pub const ChannelFeeModel: FeeModel = FeeModel{relay_fee: SSC}; +} + +impl pallet_messenger::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SelfChainId = SelfChainId; + + fn get_endpoint_handler(endpoint: &Endpoint) -> Option>> { + if endpoint == &Endpoint::Id(TransporterEndpointId::get()) { + Some(Box::new(EndpointHandler(PhantomData::))) + } else { + None + } + } + + type Currency = Balances; + type WeightInfo = pallet_messenger::weights::SubstrateWeight; + type WeightToFee = IdentityFee; + type OnXDMRewards = OnXDMRewards; + type MmrHash = MmrHash; + type MmrProofVerifier = MmrProofVerifier; + type StorageKeys = StorageKeys; + type DomainOwner = (); + type HoldIdentifier = HoldIdentifier; + type ChannelReserveFee = ChannelReserveFee; + type ChannelInitReservePortion = ChannelInitReservePortion; + type DomainRegistration = (); + type ChannelFeeModel = ChannelFeeModel; +} + +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; +} + +parameter_types! { + pub const TransporterEndpointId: EndpointId = 1; +} + +impl pallet_transporter::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SelfChainId = SelfChainId; + type SelfEndpointId = TransporterEndpointId; + type Currency = Balances; + type Sender = Messenger; + type AccountIdConverter = domain_runtime_primitives::AccountIdConverter; + type WeightInfo = pallet_transporter::weights::SubstrateWeight; +} + +impl pallet_domain_id::Config for Runtime {} + +pub struct IntoRuntimeCall; + +impl sp_domain_sudo::IntoRuntimeCall for IntoRuntimeCall { + fn runtime_call(call: Vec) -> RuntimeCall { + UncheckedExtrinsic::decode(&mut call.as_slice()) + .expect("must always be a valid extrinsic as checked by consensus chain; qed") + .function + } +} + +impl pallet_domain_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type IntoRuntimeCall = IntoRuntimeCall; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +// +// NOTE: Currently domain runtime does not naturally support the pallets with inherent extrinsics. +construct_runtime!( + pub struct Runtime { + // System support stuff. + System: frame_system = 0, + // Note: Ensure index of the timestamp matches with the index of timestamp on Consensus + // so that consensus can construct encoded extrinsic that matches with Domain encoded + // extrinsic. + Timestamp: pallet_timestamp = 1, + ExecutivePallet: domain_pallet_executive = 2, + + // monetary stuff + Balances: pallet_balances = 20, + TransactionPayment: pallet_transaction_payment = 21, + + // AutoId + AutoId: pallet_auto_id = 40, + + // messenger stuff + // Note: Indexes should match with indexes on other chains and domains + Messenger: pallet_messenger = 60, + Transporter: pallet_transporter = 61, + + // domain instance stuff + SelfDomainId: pallet_domain_id = 90, + BlockFees: pallet_block_fees = 91, + + // Sudo account + Sudo: pallet_domain_sudo = 100, + } +); + +fn is_xdm_mmr_proof_valid(ext: &::Extrinsic) -> Option { + match &ext.function { + RuntimeCall::Messenger(pallet_messenger::Call::relay_message { msg }) + | RuntimeCall::Messenger(pallet_messenger::Call::relay_message_response { msg }) => { + let ConsensusChainMmrLeafProof { + consensus_block_number, + opaque_mmr_leaf, + proof, + .. + } = msg.proof.consensus_mmr_proof(); + + if !is_consensus_block_finalized(consensus_block_number) { + return Some(false); + } + + Some(verify_mmr_proof(vec![opaque_mmr_leaf], proof.encode())) + } + _ => None, + } +} + +/// Returns a valid Sudo call. +/// Should extend this function to limit specific calls Sudo can make when needed. +fn is_valid_sudo_call(encoded_ext: Vec) -> bool { + UncheckedExtrinsic::decode(&mut encoded_ext.as_slice()).is_ok() +} + +fn construct_sudo_call_extrinsic(encoded_ext: Vec) -> ::Extrinsic { + let ext = UncheckedExtrinsic::decode(&mut encoded_ext.as_slice()) + .expect("must always be an valid extrinsic due to the check above; qed"); + UncheckedExtrinsic::new_unsigned( + pallet_domain_sudo::Call::sudo { + call: Box::new(ext.function), + } + .into(), + ) +} + +fn extract_signer_inner( + ext: &UncheckedExtrinsic, + lookup: &Lookup, +) -> Option> +where + Lookup: sp_runtime::traits::Lookup, +{ + ext.signature + .as_ref() + .map(|(signed, _, _)| lookup.lookup(signed.clone()).map_err(|e| e.into())) +} + +pub fn extract_signer( + extrinsics: Vec, +) -> Vec<(Option, UncheckedExtrinsic)> { + let lookup = frame_system::ChainContext::::default(); + + extrinsics + .into_iter() + .map(|extrinsic| { + let maybe_signer = + extract_signer_inner(&extrinsic, &lookup).and_then(|account_result| { + account_result.ok().map(|account_id| account_id.encode()) + }); + (maybe_signer, extrinsic) + }) + .collect() +} + +fn extrinsic_era(extrinsic: &::Extrinsic) -> Option { + extrinsic.signature.as_ref().map(|(_, _, extra)| extra.4 .0) +} + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + frame_benchmarking::define_benchmarks!( + [frame_benchmarking, BaselineBench::] + [frame_system, SystemBench::] + [domain_pallet_executive, ExecutivePallet] + [pallet_messenger, Messenger] + [pallet_auto_id, AutoId] + ); +} + +fn check_transaction_and_do_pre_dispatch_inner( + uxt: &::Extrinsic, +) -> Result<(), TransactionValidityError> { + let lookup = frame_system::ChainContext::::default(); + + let xt = uxt.clone().check(&lookup)?; + + let dispatch_info = xt.get_dispatch_info(); + + if dispatch_info.class == DispatchClass::Mandatory { + return Err(InvalidTransaction::MandatoryValidation.into()); + } + + let encoded_len = uxt.encoded_size(); + + // We invoke `pre_dispatch` in addition to `validate_transaction`(even though the validation is almost same) + // as that will add the side effect of SignedExtension in the storage buffer + // which would help to maintain context across multiple transaction validity check against same + // runtime instance. + match xt.signed { + // signed transaction + Some((account_id, extra)) => extra + .pre_dispatch(&account_id, &xt.function, &dispatch_info, encoded_len) + .map(|_| ()), + // unsigned transaction + None => { + if let RuntimeCall::Messenger(call) = &xt.function { + Messenger::pre_dispatch_with_trusted_mmr_proof(call)?; + } else { + Runtime::pre_dispatch(&xt.function).map(|_| ())?; + } + SignedExtra::pre_dispatch_unsigned(&xt.function, &dispatch_info, encoded_len) + .map(|_| ()) + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +impl frame_system_benchmarking::Config for Runtime {} + +#[cfg(feature = "runtime-benchmarks")] +impl frame_benchmarking::baseline::Config for Runtime {} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl sp_domains::core_api::DomainCoreApi for Runtime { + fn extract_signer( + extrinsics: Vec<::Extrinsic>, + ) -> Vec<(Option, ::Extrinsic)> { + extract_signer(extrinsics) + } + + fn is_within_tx_range( + extrinsic: &::Extrinsic, + bundle_vrf_hash: &subspace_core_primitives::U256, + tx_range: &subspace_core_primitives::U256 + ) -> bool { + use subspace_core_primitives::U256; + use subspace_core_primitives::crypto::blake3_hash; + + let lookup = frame_system::ChainContext::::default(); + if let Some(signer) = extract_signer_inner(extrinsic, &lookup).and_then(|account_result| { + account_result.ok().map(|account_id| account_id.encode()) + }) { + // Check if the signer Id hash is within the tx range + let signer_id_hash = U256::from_be_bytes(blake3_hash(&signer.encode())); + sp_domains::signer_in_tx_range(bundle_vrf_hash, &signer_id_hash, tx_range) + } else { + // Unsigned transactions are always in the range. + true + } + } + + fn intermediate_roots() -> Vec<[u8; 32]> { + ExecutivePallet::intermediate_roots() + } + + fn initialize_block_with_post_state_root(header: &::Header) -> Vec { + Executive::initialize_block(header); + Executive::storage_root() + } + + fn apply_extrinsic_with_post_state_root(extrinsic: ::Extrinsic) -> Vec { + let _ = Executive::apply_extrinsic(extrinsic); + Executive::storage_root() + } + + fn construct_set_code_extrinsic(code: Vec) -> Vec { + UncheckedExtrinsic::new_unsigned( + domain_pallet_executive::Call::set_code { + code + }.into() + ).encode() + } + + fn construct_timestamp_extrinsic(moment: Moment) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_timestamp::Call::set{ now: moment }.into() + ) + } + + fn is_inherent_extrinsic(extrinsic: &::Extrinsic) -> bool { + match &extrinsic.function { + RuntimeCall::Timestamp(call) => Timestamp::is_inherent(call), + RuntimeCall::ExecutivePallet(call) => ExecutivePallet::is_inherent(call), + RuntimeCall::Messenger(call) => Messenger::is_inherent(call), + RuntimeCall::Sudo(call) => Sudo::is_inherent(call), + _ => false, + } + } + + fn check_extrinsics_and_do_pre_dispatch(uxts: Vec<::Extrinsic>, block_number: BlockNumber, + block_hash: ::Hash) -> Result<(), CheckExtrinsicsValidityError> { + // Initializing block related storage required for validation + System::initialize( + &(block_number + BlockNumber::one()), + &block_hash, + &Default::default(), + ); + + for (extrinsic_index, uxt) in uxts.iter().enumerate() { + check_transaction_and_do_pre_dispatch_inner(uxt).map_err(|e| { + CheckExtrinsicsValidityError { + extrinsic_index: extrinsic_index as u32, + transaction_validity_error: e + } + })?; + } + + Ok(()) + } + + fn decode_extrinsic( + opaque_extrinsic: sp_runtime::OpaqueExtrinsic, + ) -> Result<::Extrinsic, DecodeExtrinsicError> { + let encoded = opaque_extrinsic.encode(); + UncheckedExtrinsic::decode(&mut encoded.as_slice()) + .map_err(|err| DecodeExtrinsicError(format!("{}", err))) + } + + fn extrinsic_era( + extrinsic: &::Extrinsic + ) -> Option { + extrinsic_era(extrinsic) + } + + fn extrinsic_weight(ext: &::Extrinsic) -> Weight { + ext.get_dispatch_info().weight + } + + fn block_fees() -> sp_domains::BlockFees { + BlockFees::collected_block_fees() + } + + fn block_digest() -> Digest { + System::digest() + } + + fn block_weight() -> Weight { + System::block_weight().total() + } + + fn construct_consensus_chain_byte_fee_extrinsic(transaction_byte_fee: Balance) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_block_fees::Call::set_next_consensus_chain_byte_fee{ transaction_byte_fee }.into() + ) + } + + fn construct_domain_update_chain_allowlist_extrinsic(updates: DomainAllowlistUpdates) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_messenger::Call::update_domain_allowlist{ updates }.into() + ) + } + + fn transfers() -> Transfers { + Transporter::chain_transfers() + } + + fn transfers_storage_key() -> Vec { + Transporter::transfers_storage_key() + } + + fn block_fees_storage_key() -> Vec { + BlockFees::block_fees_storage_key() + } + } + + impl sp_messenger::MessengerApi for Runtime { + fn is_xdm_mmr_proof_valid( + extrinsic: &::Extrinsic, + ) -> Option { + is_xdm_mmr_proof_valid(extrinsic) + } + + fn extract_xdm_mmr_proof(ext: &::Extrinsic) -> Option> { + match &ext.function { + RuntimeCall::Messenger(pallet_messenger::Call::relay_message { msg }) + | RuntimeCall::Messenger(pallet_messenger::Call::relay_message_response { msg }) => { + Some(msg.proof.consensus_mmr_proof()) + } + _ => None, + } + } + + fn confirmed_domain_block_storage_key(_domain_id: DomainId) -> Vec { + // invalid call from Domain runtime + vec![] + } + + fn outbox_storage_key(message_key: MessageKey) -> Vec { + Messenger::outbox_storage_key(message_key) + } + + fn inbox_response_storage_key(message_key: MessageKey) -> Vec { + Messenger::inbox_response_storage_key(message_key) + } + + fn domain_chains_allowlist_update(_domain_id: DomainId) -> Option{ + // not valid call on domains + None + } + } + + impl sp_messenger::RelayerApi for Runtime { + fn block_messages() -> BlockMessagesWithStorageKey { + Messenger::get_block_messages() + } + + fn outbox_message_unsigned(msg: CrossDomainMessage, ::Hash, ::Hash>) -> Option<::Extrinsic> { + Messenger::outbox_message_unsigned(msg) + } + + fn inbox_response_message_unsigned(msg: CrossDomainMessage, ::Hash, ::Hash>) -> Option<::Extrinsic> { + Messenger::inbox_response_message_unsigned(msg) + } + + fn should_relay_outbox_message(dst_chain_id: ChainId, msg_id: MessageId) -> bool { + Messenger::should_relay_outbox_message(dst_chain_id, msg_id) + } + + fn should_relay_inbox_message_response(dst_chain_id: ChainId, msg_id: MessageId) -> bool { + Messenger::should_relay_inbox_message_response(dst_chain_id, msg_id) + } + + fn updated_channels() -> BTreeSet<(ChainId, ChannelId)> { + Messenger::updated_channels() + } + + fn channel_storage_key(chain_id: ChainId, channel_id: ChannelId) -> Vec { + Messenger::channel_storage_key(chain_id, channel_id) + } + } + + impl sp_domain_sudo::DomainSudoApi for Runtime { + fn is_valid_sudo_call(extrinsic: Vec) -> bool { + is_valid_sudo_call(extrinsic) + } + + fn construct_domain_sudo_extrinsic(inner: Vec) -> ::Extrinsic { + construct_sudo_call_extrinsic(inner) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn build_state(config: Vec) -> sp_genesis_builder::Result { + build_state::(config) + } + + fn get_preset(id: &Option) -> Option> { + get_preset::(id, |_| None) + } + + fn preset_names() -> Vec { + vec![] + } + } + + impl domain_test_primitives::OnchainStateApi for Runtime { + fn free_balance(account_id: AccountId) -> Balance { + Balances::free_balance(account_id) + } + + fn get_open_channel_for_chain(dst_chain_id: ChainId) -> Option { + Messenger::get_open_channel_for_chain(dst_chain_id).map(|(c, _)| c) + } + + fn consensus_chain_byte_fee() -> Balance { + BlockFees::consensus_chain_byte_fee() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + let mut list = Vec::::new(); + + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; + use sp_storage::TrackedStorageKey; + use frame_system_benchmarking::Pallet as SystemBench; + use frame_support::traits::WhitelistedStorageKeys; + use baseline::Pallet as BaselineBench; + + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{Runtime, RuntimeBlockWeights as BlockWeights}; + use subspace_runtime_primitives::tests_utils::FeeMultiplierUtils; + + #[test] + fn multiplier_can_grow_from_zero() { + FeeMultiplierUtils::::multiplier_can_grow_from_zero() + } +} From d5c68048cfcc003cd18b55230025f46b172bbef1 Mon Sep 17 00:00:00 2001 From: linning Date: Fri, 12 Jul 2024 19:49:03 +0800 Subject: [PATCH 05/37] Add auto-id test chain spec and instantiate it during genesis build Signed-off-by: linning --- test/subspace-test-client/Cargo.toml | 1 + .../src/auto_id_domain_chain_spec.rs | 102 ++++++++++++++++++ test/subspace-test-client/src/chain_spec.rs | 77 +++---------- ...chain_spec.rs => evm_domain_chain_spec.rs} | 60 ++++++++++- test/subspace-test-client/src/lib.rs | 5 +- 5 files changed, 176 insertions(+), 69 deletions(-) create mode 100644 test/subspace-test-client/src/auto_id_domain_chain_spec.rs rename test/subspace-test-client/src/{domain_chain_spec.rs => evm_domain_chain_spec.rs} (57%) diff --git a/test/subspace-test-client/Cargo.toml b/test/subspace-test-client/Cargo.toml index 7d205ad031..7eedf537bc 100644 --- a/test/subspace-test-client/Cargo.toml +++ b/test/subspace-test-client/Cargo.toml @@ -15,6 +15,7 @@ include = [ targets = ["x86_64-unknown-linux-gnu"] [dependencies] +auto-id-domain-test-runtime = { version = "0.1.0", path = "../../domains/test/runtime/auto-id" } codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } domain-runtime-primitives = { version = "0.1.0", path = "../../domains/primitives/runtime" } evm-domain-test-runtime = { version = "0.1.0", path = "../../domains/test/runtime/evm" } diff --git a/test/subspace-test-client/src/auto_id_domain_chain_spec.rs b/test/subspace-test-client/src/auto_id_domain_chain_spec.rs new file mode 100644 index 0000000000..cd69340645 --- /dev/null +++ b/test/subspace-test-client/src/auto_id_domain_chain_spec.rs @@ -0,0 +1,102 @@ +//! Chain specification for the auto-id domain. + +use crate::chain_spec::get_from_seed; +use auto_id_domain_test_runtime::{BalancesConfig, RuntimeGenesisConfig, SystemConfig}; +use codec::Encode; +use domain_runtime_primitives::AccountIdConverter; +use sc_chain_spec::{ChainType, GenericChainSpec, NoExtension}; +use sp_core::crypto::AccountId32; +use sp_core::{sr25519, Pair, Public}; +use sp_domains::storage::RawGenesis; +use sp_domains::{GenesisDomain, OperatorAllowList, OperatorPublicKey, RuntimeType}; +use sp_runtime::traits::{Convert, IdentifyAccount}; +use sp_runtime::{BuildStorage, MultiSigner, Percent}; +use subspace_runtime_primitives::{AccountId, Balance, SSC}; +use subspace_test_runtime::{MaxDomainBlockSize, MaxDomainBlockWeight}; + +/// Get public key from keypair seed. +pub(crate) fn get_public_key_from_seed( + seed: &'static str, +) -> ::Public { + TPublic::Pair::from_string(&format!("//{seed}"), None) + .expect("Static values are valid; qed") + .public() +} + +/// Generate an account ID from seed. +pub(crate) fn get_account_id_from_seed(seed: &'static str) -> AccountId32 { + MultiSigner::from(get_public_key_from_seed::(seed)).into_account() +} + +pub(crate) fn endowed_accounts() -> Vec { + vec![ + get_account_id_from_seed("Alice"), + get_account_id_from_seed("Bob"), + get_account_id_from_seed("Charlie"), + get_account_id_from_seed("Dave"), + get_account_id_from_seed("Eve"), + get_account_id_from_seed("Ferdie"), + get_account_id_from_seed("Alice//stash"), + get_account_id_from_seed("Bob//stash"), + get_account_id_from_seed("Charlie//stash"), + get_account_id_from_seed("Dave//stash"), + get_account_id_from_seed("Eve//stash"), + get_account_id_from_seed("Ferdie//stash"), + ] +} + +fn testnet_auto_id_genesis() -> RuntimeGenesisConfig { + RuntimeGenesisConfig { + system: SystemConfig::default(), + balances: BalancesConfig::default(), + ..Default::default() + } +} + +pub fn get_genesis_domain( + sudo_account: subspace_runtime_primitives::AccountId, +) -> Result, String> { + let raw_genesis_storage = { + let domain_chain_spec = GenericChainSpec::::builder( + auto_id_domain_test_runtime::WASM_BINARY + .ok_or_else(|| "Development wasm not available".to_string())?, + None, + ) + .with_chain_type(ChainType::Development) + .with_genesis_config( + serde_json::to_value(testnet_auto_id_genesis()) + .map_err(|error| format!("Failed to serialize genesis config: {error}"))?, + ) + .build(); + let storage = domain_chain_spec + .build_storage() + .expect("Failed to build genesis storage from genesis runtime config"); + let raw_genesis = RawGenesis::from_storage(storage); + raw_genesis.encode() + }; + + Ok(GenesisDomain { + runtime_name: "auto-id".to_owned(), + runtime_type: RuntimeType::AutoId, + runtime_version: auto_id_domain_test_runtime::VERSION, + raw_genesis_storage, + + // Domain config, mainly for placeholder the concrete value TBD + owner_account_id: sudo_account, + domain_name: "auto-id-domain".to_owned(), + max_block_size: MaxDomainBlockSize::get(), + max_block_weight: MaxDomainBlockWeight::get(), + bundle_slot_probability: (1, 1), + target_bundles_per_block: 10, + operator_allow_list: OperatorAllowList::Anyone, + + signing_key: get_from_seed::("Bob"), + minimum_nominator_stake: 100 * SSC, + nomination_tax: Percent::from_percent(5), + initial_balances: endowed_accounts() + .iter() + .cloned() + .map(|k| (AccountIdConverter::convert(k), 2_000_000 * SSC)) + .collect(), + }) +} diff --git a/test/subspace-test-client/src/chain_spec.rs b/test/subspace-test-client/src/chain_spec.rs index 8208967b95..3320cfe154 100644 --- a/test/subspace-test-client/src/chain_spec.rs +++ b/test/subspace-test-client/src/chain_spec.rs @@ -1,21 +1,15 @@ //! Chain specification for the test runtime. -use crate::domain_chain_spec::testnet_evm_genesis; -use codec::Encode; -use domain_runtime_primitives::AccountId20Converter; -use sc_chain_spec::{ChainType, GenericChainSpec, NoExtension}; +use sc_chain_spec::{ChainType, GenericChainSpec}; use sp_core::{sr25519, Pair, Public}; -use sp_domains::storage::RawGenesis; -use sp_domains::{GenesisDomain, OperatorAllowList, OperatorPublicKey, RuntimeType}; -use sp_runtime::traits::{Convert, IdentifyAccount, Verify}; -use sp_runtime::{BuildStorage, Percent}; +use sp_runtime::traits::{IdentifyAccount, Verify}; use std::marker::PhantomData; use std::num::NonZeroU32; use subspace_runtime_primitives::{AccountId, Balance, BlockNumber, Signature}; use subspace_test_runtime::{ - AllowAuthoringBy, BalancesConfig, DomainsConfig, EnableRewardsAt, MaxDomainBlockSize, - MaxDomainBlockWeight, RewardsConfig, RuntimeGenesisConfig, SubspaceConfig, SudoConfig, - SystemConfig, VestingConfig, SSC, WASM_BINARY, + AllowAuthoringBy, BalancesConfig, DomainsConfig, EnableRewardsAt, RewardsConfig, + RuntimeGenesisConfig, SubspaceConfig, SudoConfig, SystemConfig, VestingConfig, SSC, + WASM_BINARY, }; /// Generate a crypto pair from seed. @@ -48,13 +42,7 @@ pub fn subspace_local_testnet_config() -> Result { // Pre-funded accounts // Alice also get more funds that are used during the domain instantiation vec![ - ( - get_account_id_from_seed("Alice"), - (5_000 - + crate::domain_chain_spec::endowed_accounts().len() as Balance - * 2_000_000) - * SSC, - ), + (get_account_id_from_seed("Alice"), 1_000_000_000 * SSC), (get_account_id_from_seed("Bob"), 1_000 * SSC), (get_account_id_from_seed("Charlie"), 1_000 * SSC), (get_account_id_from_seed("Dave"), 1_000 * SSC), @@ -82,24 +70,6 @@ fn create_genesis_config( // who, start, period, period_count, per_period vesting: Vec<(AccountId, BlockNumber, BlockNumber, u32, Balance)>, ) -> Result { - let raw_genesis_storage = { - let domain_chain_spec = GenericChainSpec::::builder( - evm_domain_test_runtime::WASM_BINARY - .ok_or_else(|| "Development wasm not available".to_string())?, - None, - ) - .with_chain_type(ChainType::Development) - .with_genesis_config( - serde_json::to_value(testnet_evm_genesis()) - .map_err(|error| format!("Failed to serialize genesis config: {error}"))?, - ) - .build(); - let storage = domain_chain_spec - .build_storage() - .expect("Failed to build genesis storage from genesis runtime config"); - let raw_genesis = RawGenesis::from_storage(storage); - raw_genesis.encode() - }; Ok(RuntimeGenesisConfig { system: SystemConfig::default(), balances: BalancesConfig { balances }, @@ -122,35 +92,12 @@ fn create_genesis_config( vesting: VestingConfig { vesting }, domains: DomainsConfig { permissioned_action_allowed_by: Some(sp_domains::PermissionedActionAllowedBy::Anyone), - genesis_domains: vec![GenesisDomain { - runtime_name: "evm".to_owned(), - runtime_type: RuntimeType::Evm, - runtime_version: evm_domain_test_runtime::VERSION, - raw_genesis_storage, - - // Domain config, mainly for placeholder the concrete value TBD - owner_account_id: sudo_account, - domain_name: "evm-domain".to_owned(), - max_block_size: MaxDomainBlockSize::get(), - max_block_weight: MaxDomainBlockWeight::get(), - bundle_slot_probability: (1, 1), - target_bundles_per_block: 10, - operator_allow_list: OperatorAllowList::Anyone, - - signing_key: get_from_seed::("Alice"), - minimum_nominator_stake: 100 * SSC, - nomination_tax: Percent::from_percent(5), - initial_balances: crate::domain_chain_spec::endowed_accounts() - .iter() - .cloned() - .map(|k| { - ( - AccountId20Converter::convert(k), - 2_000_000 * subspace_runtime_primitives::SSC, - ) - }) - .collect(), - }], + genesis_domains: vec![ + crate::evm_domain_chain_spec::get_genesis_domain(sudo_account.clone()) + .expect("Must success"), + crate::auto_id_domain_chain_spec::get_genesis_domain(sudo_account) + .expect("Must success"), + ], }, runtime_configs: Default::default(), }) diff --git a/test/subspace-test-client/src/domain_chain_spec.rs b/test/subspace-test-client/src/evm_domain_chain_spec.rs similarity index 57% rename from test/subspace-test-client/src/domain_chain_spec.rs rename to test/subspace-test-client/src/evm_domain_chain_spec.rs index f0e13445ec..dac2f89f63 100644 --- a/test/subspace-test-client/src/domain_chain_spec.rs +++ b/test/subspace-test-client/src/evm_domain_chain_spec.rs @@ -1,11 +1,19 @@ //! Chain specification for the evm domain. +use crate::chain_spec::get_from_seed; +use codec::Encode; +use domain_runtime_primitives::AccountId20Converter; use evm_domain_test_runtime::{ AccountId as AccountId20, Precompiles, RuntimeGenesisConfig, Signature, }; +use sc_chain_spec::{ChainType, GenericChainSpec, NoExtension}; use sp_core::{ecdsa, Pair, Public}; -use sp_domains::DomainId; -use sp_runtime::traits::{IdentifyAccount, Verify}; +use sp_domains::storage::RawGenesis; +use sp_domains::{DomainId, GenesisDomain, OperatorAllowList, OperatorPublicKey, RuntimeType}; +use sp_runtime::traits::{Convert, IdentifyAccount, Verify}; +use sp_runtime::{BuildStorage, Percent}; +use subspace_runtime_primitives::{AccountId, Balance, SSC}; +use subspace_test_runtime::{MaxDomainBlockSize, MaxDomainBlockWeight}; type AccountPublic = ::Signer; @@ -82,3 +90,51 @@ pub fn testnet_evm_genesis() -> RuntimeGenesisConfig { ..Default::default() } } + +pub fn get_genesis_domain( + sudo_account: subspace_runtime_primitives::AccountId, +) -> Result, String> { + let raw_genesis_storage = { + let domain_chain_spec = GenericChainSpec::::builder( + evm_domain_test_runtime::WASM_BINARY + .ok_or_else(|| "Development wasm not available".to_string())?, + None, + ) + .with_chain_type(ChainType::Development) + .with_genesis_config( + serde_json::to_value(testnet_evm_genesis()) + .map_err(|error| format!("Failed to serialize genesis config: {error}"))?, + ) + .build(); + let storage = domain_chain_spec + .build_storage() + .expect("Failed to build genesis storage from genesis runtime config"); + let raw_genesis = RawGenesis::from_storage(storage); + raw_genesis.encode() + }; + + Ok(GenesisDomain { + runtime_name: "evm".to_owned(), + runtime_type: RuntimeType::Evm, + runtime_version: evm_domain_test_runtime::VERSION, + raw_genesis_storage, + + // Domain config, mainly for placeholder the concrete value TBD + owner_account_id: sudo_account, + domain_name: "evm-domain".to_owned(), + max_block_size: MaxDomainBlockSize::get(), + max_block_weight: MaxDomainBlockWeight::get(), + bundle_slot_probability: (1, 1), + target_bundles_per_block: 10, + operator_allow_list: OperatorAllowList::Anyone, + + signing_key: get_from_seed::("Alice"), + minimum_nominator_stake: 100 * SSC, + nomination_tax: Percent::from_percent(5), + initial_balances: endowed_accounts() + .iter() + .cloned() + .map(|k| (AccountId20Converter::convert(k), 2_000_000 * SSC)) + .collect(), + }) +} diff --git a/test/subspace-test-client/src/lib.rs b/test/subspace-test-client/src/lib.rs index 0059f148f0..2b6d13f903 100644 --- a/test/subspace-test-client/src/lib.rs +++ b/test/subspace-test-client/src/lib.rs @@ -16,10 +16,11 @@ //! Subspace test client only. -#![warn(missing_docs, unused_crate_dependencies)] +#![warn(unused_crate_dependencies)] +pub mod auto_id_domain_chain_spec; pub mod chain_spec; -pub mod domain_chain_spec; +pub mod evm_domain_chain_spec; use futures::executor::block_on; use futures::StreamExt; From d4e4eef33d198a3f7038aa645b50bc95d577ea3b Mon Sep 17 00:00:00 2001 From: linning Date: Fri, 12 Jul 2024 19:50:16 +0800 Subject: [PATCH 06/37] Add infra to support running auto-id node in integration test Signed-off-by: linning --- crates/sp-domains-fraud-proof/src/tests.rs | 32 +++-- domains/test/service/Cargo.toml | 1 + domains/test/service/src/domain.rs | 121 +++++++++-------- domains/test/service/src/lib.rs | 147 +++++++++++++-------- 4 files changed, 180 insertions(+), 121 deletions(-) diff --git a/crates/sp-domains-fraud-proof/src/tests.rs b/crates/sp-domains-fraud-proof/src/tests.rs index 457767f80a..a6739b178b 100644 --- a/crates/sp-domains-fraud-proof/src/tests.rs +++ b/crates/sp-domains-fraud-proof/src/tests.rs @@ -4,13 +4,11 @@ use crate::test_ethereum_tx::{ use codec::Encode; use domain_runtime_primitives::{Balance, CheckExtrinsicsValidityError}; use domain_test_service::evm_domain_test_runtime::{ - Runtime as TestRuntime, RuntimeCall, UncheckedExtrinsic as RuntimeUncheckedExtrinsic, + Runtime as TestRuntime, RuntimeCall, Signature, UncheckedExtrinsic as RuntimeUncheckedExtrinsic, }; use domain_test_service::EcdsaKeyring::{Alice, Charlie}; use domain_test_service::Sr25519Keyring::Ferdie; -use domain_test_service::{ - construct_extrinsic_generic_with_custom_key, EvmDomainNode, GENESIS_DOMAIN_ID, -}; +use domain_test_service::{construct_extrinsic_raw_payload, EvmDomainNode, GENESIS_DOMAIN_ID}; use ethereum::TransactionV2 as Transaction; use fp_rpc::EthereumRuntimeRPCApi; use frame_support::pallet_prelude::DispatchClass; @@ -224,17 +222,23 @@ async fn benchmark_bundle_with_evm_tx( } 3 => { let ecdsa_key = Pair::from_seed_slice(&account_info.private_key.0).unwrap(); + let function: RuntimeCall = pallet_balances::Call::transfer_allow_death { + dest: account_info.address.0.into(), + value: other_accounts_balance, + } + .into(); + let (raw_payload, extra) = + construct_extrinsic_raw_payload(&alice.client, function.clone(), false, 0, 1); + let signature = raw_payload.using_encoded(|e| { + let msg = keccak_256(e); + ecdsa_key.sign_prehashed(&msg) + }); fp_self_contained::UncheckedExtrinsic( - construct_extrinsic_generic_with_custom_key::( - &alice.client, - pallet_balances::Call::transfer_allow_death { - dest: account_info.address.0.into(), - value: other_accounts_balance, - }, - ecdsa_key, - false, - 0, - 1, + sp_runtime::generic::UncheckedExtrinsic::new_signed( + function.clone(), + ecdsa_key.public().into(), + Signature::new(signature), + extra, ), ) } diff --git a/domains/test/service/Cargo.toml b/domains/test/service/Cargo.toml index 4837e06b25..d7bdcef5ef 100644 --- a/domains/test/service/Cargo.toml +++ b/domains/test/service/Cargo.toml @@ -12,6 +12,7 @@ include = [ ] [dependencies] +auto-id-domain-test-runtime = { version = "0.1.0", path = "../runtime/auto-id" } cross-domain-message-gossip = { version = "0.1.0", path = "../../client/cross-domain-message-gossip" } domain-client-operator = { version = "0.1.0", path = "../../client/domain-operator" } domain-service = { version = "0.1.0", path = "../../service" } diff --git a/domains/test/service/src/domain.rs b/domains/test/service/src/domain.rs index 8d5d5a06d4..307fa10759 100644 --- a/domains/test/service/src/domain.rs +++ b/domains/test/service/src/domain.rs @@ -3,7 +3,8 @@ use crate::chain_spec::create_domain_spec; use crate::{ - construct_extrinsic_generic, node_config, BalanceOf, EcdsaKeyring, UncheckedExtrinsicFor, + construct_extrinsic_generic, node_config, BalanceOf, DomainRuntime, EcdsaKeyring, + Sr25519Keyring, UncheckedExtrinsicFor, AUTO_ID_DOMAIN_ID, EVM_DOMAIN_ID, }; use cross_domain_message_gossip::ChainMsg; use domain_client_operator::{fetch_domain_bootstrap_info, BootstrapResult, OperatorStreams}; @@ -12,8 +13,6 @@ use domain_runtime_primitives::Balance; use domain_service::providers::DefaultProvider; use domain_service::FullClient; use domain_test_primitives::OnchainStateApi; -use evm_domain_test_runtime::AccountId as AccountId20; -use fp_rpc::EthereumRuntimeRPCApi; use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; use frame_system::pallet_prelude::BlockNumberFor; use pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi; @@ -25,11 +24,10 @@ use sc_service::config::MultiaddrWithPeerId; use sc_service::{BasePath, PruningMode, Role, RpcHandlers, TFullBackend, TaskManager}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use serde::de::DeserializeOwned; use sp_api::{ApiExt, ConstructRuntimeApi, Metadata, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder; use sp_consensus_subspace::SubspaceApi; -use sp_core::{Decode, Encode, H256}; +use sp_core::{Encode, H256}; use sp_domains::core_api::DomainCoreApi; use sp_domains::DomainId; use sp_messenger::messages::{ChainId, ChannelId}; @@ -39,10 +37,7 @@ use sp_runtime::traits::{Block as BlockT, Dispatchable, NumberFor}; use sp_runtime::OpaqueExtrinsic; use sp_session::SessionKeys; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use std::fmt::{Debug, Display}; use std::future::Future; -use std::marker::PhantomData; -use std::str::FromStr; use std::sync::Arc; use subspace_runtime_primitives::opaque::Block as CBlock; use subspace_runtime_primitives::Nonce; @@ -52,18 +47,6 @@ use substrate_test_client::{ BlockchainEventsExt, RpcHandlersExt, RpcTransactionError, RpcTransactionOutput, }; -/// Trait for convert keyring to account id -pub trait FromKeyring { - /// Convert keyring to account id - fn from_keyring(key: EcdsaKeyring) -> Self; -} - -impl FromKeyring for AccountId20 { - fn from_keyring(key: EcdsaKeyring) -> Self { - key.to_account_id() - } -} - /// The backend type used by the test service. pub type Backend = TFullBackend; @@ -74,8 +57,9 @@ pub type DomainOperator = domain_service::DomainOperator; /// A generic domain node instance used for testing. -pub struct DomainNode +pub struct DomainNode where + Runtime: DomainRuntime, RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: ApiExt + Metadata @@ -85,17 +69,14 @@ where + DomainCoreApi + MessengerApi, ::Hash> + TaggedTransactionQueue - + AccountNonceApi + + AccountNonceApi::AccountId, Nonce> + TransactionPaymentRuntimeApi + RelayerApi, NumberFor, ::Hash>, - AccountId: Encode + Decode + FromKeyring, { /// The domain id pub domain_id: DomainId, - // TODO: Make the signing scheme generic over domains, because Ecdsa only used in the EVM domain, - // other (incoming) domains may use Sr25519 /// The node's account key - pub key: EcdsaKeyring, + pub key: ::Keyring, /// TaskManager's instance. pub task_manager: TaskManager, /// Client's instance. @@ -117,12 +98,15 @@ where pub operator: DomainOperator, /// Sink to the node's tx pool pub tx_pool_sink: TracingUnboundedSender, - _phantom_data: PhantomData<(Runtime, AccountId)>, } -impl DomainNode +impl DomainNode where - Runtime: frame_system::Config + pallet_transaction_payment::Config + Send + Sync, + Runtime: frame_system::Config + + pallet_transaction_payment::Config + + DomainRuntime + + Send + + Sync, Runtime::RuntimeCall: Dispatchable + Send + Sync, crate::BalanceOf: Send + Sync + From + sp_runtime::FixedPointOperand, @@ -135,29 +119,17 @@ where + SessionKeys + DomainCoreApi + TaggedTransactionQueue - + AccountNonceApi + + AccountNonceApi::AccountId, Nonce> + TransactionPaymentRuntimeApi + MessengerApi, ::Hash> + RelayerApi, NumberFor, ::Hash> - + OnchainStateApi - + EthereumRuntimeRPCApi, - AccountId: DeserializeOwned - + Encode - + Decode - + Clone - + Debug - + Display - + FromStr - + Sync - + Send - + FromKeyring - + 'static, + + OnchainStateApi::AccountId, Balance>, { #[allow(clippy::too_many_arguments)] async fn build( domain_id: DomainId, tokio_handle: tokio::runtime::Handle, - key: EcdsaKeyring, + key: ::Keyring, base_path: BasePath, domain_nodes: Vec, domain_nodes_exclusive: bool, @@ -173,10 +145,11 @@ where .await .expect("Failed to get domain instance data"); let chain_spec = create_domain_spec(domain_instance_data.raw_genesis); + let key_seed = ::to_seed(key); let domain_config = node_config( domain_id, tokio_handle.clone(), - key, + key_seed, domain_nodes, domain_nodes_exclusive, role.clone(), @@ -211,7 +184,9 @@ where .xdm_gossip_worker_builder() .gossip_msg_sink(); - let maybe_operator_id = role.is_authority().then_some(0); + let maybe_operator_id = role + .is_authority() + .then_some(if domain_id == EVM_DOMAIN_ID { 0 } else { 1 }); let consensus_best_hash = mock_consensus_node.client.info().best_hash; let chain_constants = mock_consensus_node @@ -241,10 +216,19 @@ where confirmation_depth_k: chain_constants.confirmation_depth_k(), }; - let domain_node = - domain_service::new_full::<_, _, _, _, _, _, RuntimeApi, AccountId, _>(domain_params) - .await - .expect("failed to build domain node"); + let domain_node = domain_service::new_full::< + _, + _, + _, + _, + _, + _, + RuntimeApi, + ::AccountId, + _, + >(domain_params) + .await + .expect("failed to build domain node"); let domain_service::NewFull { task_manager, @@ -285,7 +269,7 @@ where rpc_handlers, operator, tx_pool_sink: domain_message_sink, - _phantom_data: Default::default(), + // _phantom_data: Default::default(), } } @@ -302,7 +286,7 @@ where .runtime_api() .account_nonce( self.client.info().best_hash, - ::from_keyring(self.key), + ::account_id(self.key), ) .expect("Fail to get account nonce") } @@ -387,7 +371,7 @@ where } /// Get the free balance of the given account - pub fn free_balance(&self, account_id: AccountId) -> Balance { + pub fn free_balance(&self, account_id: ::AccountId) -> Balance { self.client .runtime_api() .free_balance(self.client.info().best_hash, account_id) @@ -433,7 +417,6 @@ impl DomainNodeBuilder { /// Create a new instance of `Self`. /// /// `tokio_handle` - The tokio handler to use. - /// `key` - The key that will be used to generate the name. /// `base_path` - Where databases will be stored. pub fn new( tokio_handle: tokio::runtime::Handle, @@ -493,11 +476,39 @@ impl DomainNodeBuilder { ) .await } + + /// Build a evm domain node + pub async fn build_auto_id_node( + self, + key: Sr25519Keyring, + role: Role, + mock_consensus_node: &mut MockConsensusNode, + ) -> AutoIdDomainNode { + DomainNode::build( + AUTO_ID_DOMAIN_ID, + self.tokio_handle, + key, + self.base_path, + self.domain_nodes, + self.domain_nodes_exclusive, + self.skip_empty_bundle_production, + role, + mock_consensus_node, + ) + .await + } } /// The evm domain node pub type EvmDomainNode = - DomainNode; + DomainNode; /// The evm domain client pub type EvmDomainClient = Client; + +/// The auto-id domain node +pub type AutoIdDomainNode = + DomainNode; + +/// The auto-id domain client +pub type AutoIdDomainClient = Client; diff --git a/domains/test/service/src/lib.rs b/domains/test/service/src/lib.rs index 6e4c8596de..899d8642f0 100644 --- a/domains/test/service/src/lib.rs +++ b/domains/test/service/src/lib.rs @@ -17,19 +17,15 @@ //! Crate used for testing with Domain. #![feature(trait_upcasting)] -#![warn(missing_docs)] pub mod chain_spec; pub mod domain; pub mod keyring; -pub use keyring::Keyring as EcdsaKeyring; -pub use sp_keyring::Sr25519Keyring; - use domain_runtime_primitives::opaque::Block; -use evm_domain_test_runtime::{Address, Signature}; use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; use frame_system::pallet_prelude::BlockNumberFor; +pub use keyring::Keyring as EcdsaKeyring; use sc_network::config::{NonReservedPeerMode, TransportConfig}; use sc_network::multiaddr; use sc_service::config::{ @@ -41,14 +37,18 @@ use sc_service::{ BasePath, BlocksPruning, ChainSpec, Configuration as ServiceConfiguration, Error as ServiceError, Role, }; +use serde::de::DeserializeOwned; use sp_arithmetic::traits::SaturatedConversion; use sp_blockchain::HeaderBackend; -use sp_core::{ecdsa, keccak_256, Get, Pair, H256}; +use sp_core::{Get, H256}; use sp_domains::DomainId; -use sp_runtime::codec::Encode; +pub use sp_keyring::Sr25519Keyring; +use sp_runtime::codec::{Decode, Encode}; use sp_runtime::generic; use sp_runtime::generic::SignedPayload; use sp_runtime::traits::Dispatchable; +use std::fmt::{Debug, Display}; +use std::str::FromStr; pub use domain::*; pub use evm_domain_test_runtime; @@ -56,6 +56,12 @@ pub use evm_domain_test_runtime; /// The domain id of the genesis domain pub const GENESIS_DOMAIN_ID: DomainId = DomainId::new(0u32); +/// The domain id of the evm domain +pub const EVM_DOMAIN_ID: DomainId = DomainId::new(0u32); + +/// The domain id of the auto-id domain +pub const AUTO_ID_DOMAIN_ID: DomainId = DomainId::new(1u32); + /// Create a domain node `Configuration`. /// /// By default an in-memory socket will be used, therefore you need to provide nodes if you want the @@ -65,7 +71,7 @@ pub const GENESIS_DOMAIN_ID: DomainId = DomainId::new(0u32); pub fn node_config( domain_id: DomainId, tokio_handle: tokio::runtime::Handle, - key: EcdsaKeyring, + key_seed: String, nodes: Vec, nodes_exclusive: bool, role: Role, @@ -73,7 +79,6 @@ pub fn node_config( chain_spec: Box, ) -> Result { let root = base_path.path().to_path_buf(); - let key_seed = key.to_seed(); let domain_name = format!("{domain_id:?}"); @@ -170,15 +175,15 @@ type SignedExtraFor = ( ); type UncheckedExtrinsicFor = generic::UncheckedExtrinsic< - Address, + ::Address, ::RuntimeCall, - Signature, + ::Signature, SignedExtraFor, >; type BalanceOf = <::OnChargeTransaction as pallet_transaction_payment::OnChargeTransaction>::Balance; -fn construct_extrinsic_raw_payload( +pub fn construct_extrinsic_raw_payload( client: impl AsRef, function: ::RuntimeCall, immortal: bool, @@ -230,49 +235,87 @@ where ) } -/// Construct a generic extrinsic signed by custom key -pub fn construct_extrinsic_generic_with_custom_key( - client: impl AsRef, - function: impl Into<::RuntimeCall>, - caller: ecdsa::Pair, - immortal: bool, - nonce: u32, - tip: BalanceOf, -) -> UncheckedExtrinsicFor -where - Runtime: frame_system::Config + pallet_transaction_payment::Config + Send + Sync, - Runtime::RuntimeCall: - Dispatchable + Send + Sync, - BalanceOf: Send + Sync + From + sp_runtime::FixedPointOperand, - u64: From>, - Client: HeaderBackend, -{ - let function = function.into(); - let (raw_payload, extra) = - construct_extrinsic_raw_payload(client, function.clone(), immortal, nonce, tip); - let signature = raw_payload.using_encoded(|e| { - let msg = keccak_256(e); - caller.sign_prehashed(&msg) - }); - UncheckedExtrinsicFor::::new_signed( - function.clone(), - caller.public().into(), - Signature::new(signature), - extra, - ) +pub trait DomainRuntime { + type Keyring: Copy; + type AccountId: DeserializeOwned + + Encode + + Decode + + Clone + + Debug + + Display + + FromStr + + Sync + + Send + + 'static; + type Address: Encode + Decode; + type Signature: Encode + Decode; + fn sign(key: Self::Keyring, payload: &[u8]) -> Self::Signature; + fn account_id(key: Self::Keyring) -> Self::AccountId; + fn address(key: Self::Keyring) -> Self::Address; + fn to_seed(key: Self::Keyring) -> String; +} + +impl DomainRuntime for evm_domain_test_runtime::Runtime { + type Keyring = EcdsaKeyring; + type AccountId = evm_domain_test_runtime::AccountId; + type Address = evm_domain_test_runtime::Address; + type Signature = evm_domain_test_runtime::Signature; + + fn sign(key: Self::Keyring, payload: &[u8]) -> Self::Signature { + evm_domain_test_runtime::Signature::new(key.sign(payload)) + } + + fn account_id(key: Self::Keyring) -> Self::AccountId { + key.to_account_id() + } + + fn address(key: Self::Keyring) -> Self::Address { + key.to_account_id() + } + + fn to_seed(key: Self::Keyring) -> String { + key.to_seed() + } +} + +impl DomainRuntime for auto_id_domain_test_runtime::Runtime { + type Keyring = Sr25519Keyring; + type AccountId = auto_id_domain_test_runtime::AccountId; + type Address = auto_id_domain_test_runtime::Address; + type Signature = auto_id_domain_test_runtime::Signature; + + fn sign(key: Self::Keyring, payload: &[u8]) -> Self::Signature { + key.sign(payload).into() + } + + fn account_id(key: Self::Keyring) -> Self::AccountId { + key.to_account_id() + } + + fn address(key: Self::Keyring) -> Self::Address { + sp_runtime::MultiAddress::Id(key.to_account_id()) + } + + fn to_seed(key: Self::Keyring) -> String { + key.to_seed() + } } /// Construct an extrinsic that can be applied to the test runtime. pub fn construct_extrinsic_generic( client: impl AsRef, function: impl Into<::RuntimeCall>, - caller: EcdsaKeyring, + caller: ::Keyring, immortal: bool, nonce: u32, tip: BalanceOf, ) -> UncheckedExtrinsicFor where - Runtime: frame_system::Config + pallet_transaction_payment::Config + Send + Sync, + Runtime: frame_system::Config + + pallet_transaction_payment::Config + + DomainRuntime + + Send + + Sync, Runtime::RuntimeCall: Dispatchable + Send + Sync, BalanceOf: Send + Sync + From + sp_runtime::FixedPointOperand, @@ -282,13 +325,9 @@ where let function = function.into(); let (raw_payload, extra) = construct_extrinsic_raw_payload(client, function.clone(), immortal, nonce, tip); - let signature = raw_payload.using_encoded(|e| caller.sign(e)); - UncheckedExtrinsicFor::::new_signed( - function, - caller.to_account_id(), - Signature::new(signature), - extra, - ) + let signature = raw_payload.using_encoded(|e| ::sign(caller, e)); + let address = ::address(caller); + UncheckedExtrinsicFor::::new_signed(function, address, signature, extra) } /// Construct an unsigned extrinsic that can be applied to the test runtime. @@ -296,7 +335,11 @@ pub fn construct_unsigned_extrinsic( function: impl Into<::RuntimeCall>, ) -> UncheckedExtrinsicFor where - Runtime: frame_system::Config + pallet_transaction_payment::Config + Send + Sync, + Runtime: frame_system::Config + + pallet_transaction_payment::Config + + DomainRuntime + + Send + + Sync, Runtime::RuntimeCall: Dispatchable + Send + Sync, BalanceOf: Send + Sync + From + sp_runtime::FixedPointOperand, From e6a23ca7fff84f3753d3eb346d1da7c110a6ad28 Mon Sep 17 00:00:00 2001 From: linning Date: Fri, 12 Jul 2024 19:51:13 +0800 Subject: [PATCH 07/37] Enable/rework xdm tests Signed-off-by: linning --- .../src/gossip_worker.rs | 5 + domains/client/domain-operator/Cargo.toml | 1 + domains/client/domain-operator/src/tests.rs | 661 ++++++++---------- 3 files changed, 285 insertions(+), 382 deletions(-) diff --git a/domains/client/cross-domain-message-gossip/src/gossip_worker.rs b/domains/client/cross-domain-message-gossip/src/gossip_worker.rs index 669a87b3b2..c365c34fe5 100644 --- a/domains/client/cross-domain-message-gossip/src/gossip_worker.rs +++ b/domains/client/cross-domain-message-gossip/src/gossip_worker.rs @@ -85,6 +85,11 @@ impl GossipWorkerBuilder { self.chain_sinks.insert(chain_id, sink); } + // Remove the chain sink + pub fn remove_chain_sink(&mut self, chain_id: &ChainId) -> Option { + self.chain_sinks.remove(chain_id) + } + /// Get the gossip message sink pub fn gossip_msg_sink(&self) -> TracingUnboundedSender { self.gossip_msg_sink.clone() diff --git a/domains/client/domain-operator/Cargo.toml b/domains/client/domain-operator/Cargo.toml index 1280e50423..464454e845 100644 --- a/domains/client/domain-operator/Cargo.toml +++ b/domains/client/domain-operator/Cargo.toml @@ -44,6 +44,7 @@ thiserror = "1.0.61" tokio = { version = "1.38.0", features = ["macros"] } [dev-dependencies] +auto-id-domain-test-runtime = { version = "0.1.0", path = "../../test/runtime/auto-id" } cross-domain-message-gossip = { path = "../../client/cross-domain-message-gossip" } domain-test-service = { version = "0.1.0", path = "../../test/service" } domain-test-primitives = { version = "0.1.0", path = "../../test/primitives" } diff --git a/domains/client/domain-operator/src/tests.rs b/domains/client/domain-operator/src/tests.rs index 95864dfb27..6f0116431a 100644 --- a/domains/client/domain-operator/src/tests.rs +++ b/domains/client/domain-operator/src/tests.rs @@ -11,7 +11,9 @@ use domain_test_primitives::{OnchainStateApi, TimestampApi}; use domain_test_service::evm_domain_test_runtime::{Header, UncheckedExtrinsic}; use domain_test_service::EcdsaKeyring::{Alice, Bob, Charlie, Eve}; use domain_test_service::Sr25519Keyring::{self, Alice as Sr25519Alice, Ferdie}; -use domain_test_service::{construct_extrinsic_generic, GENESIS_DOMAIN_ID}; +use domain_test_service::{ + construct_extrinsic_generic, AUTO_ID_DOMAIN_ID, EVM_DOMAIN_ID, GENESIS_DOMAIN_ID, +}; use futures::StreamExt; use pallet_messenger::ChainAllowlistUpdate; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, HeaderBackend}; @@ -21,10 +23,11 @@ use sc_service::{BasePath, Role}; use sc_transaction_pool::error::Error as PoolError; use sc_transaction_pool_api::error::Error as TxPoolError; use sc_transaction_pool_api::TransactionPool; +use sc_utils::mpsc::tracing_unbounded; use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_consensus::SyncOracle; use sp_core::storage::StateVersion; -use sp_core::traits::FetchRuntimeCode; +use sp_core::traits::{FetchRuntimeCode, SpawnEssentialNamed}; use sp_core::{Pair, H256}; use sp_domain_digests::AsPredigest; use sp_domains::core_api::DomainCoreApi; @@ -54,7 +57,7 @@ use sp_state_machine::backend::AsTrieBackend; use sp_subspace_mmr::ConsensusChainMmrLeafProof; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use sp_weights::Weight; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, VecDeque}; use std::sync::Arc; use subspace_core_primitives::PotOutput; use subspace_runtime_primitives::opaque::Block as CBlock; @@ -3541,7 +3544,7 @@ async fn test_domain_sudo_calls() { } #[tokio::test(flavor = "multi_thread")] -async fn test_cross_domains_messages_should_work() { +async fn test_xdm_between_consensus_and_domain_should_work() { let directory = TempDir::new().expect("Must be able to create temporary directory"); let mut builder = sc_cli::LoggerBuilder::new(""); @@ -3745,384 +3748,278 @@ async fn test_cross_domains_messages_should_work() { ); } -// TODO: Unlock test when multiple domains are supported in DecEx v2. -// #[tokio::test(flavor = "multi_thread")] -// async fn test_cross_domains_message_should_work() { -// let directory = TempDir::new().expect("Must be able to create temporary directory"); -// -// let mut builder = sc_cli::LoggerBuilder::new(""); -// builder.with_colors(false); -// let _ = builder.init(); -// -// let tokio_handle = tokio::runtime::Handle::current(); -// -// // Start Ferdie -// let mut ferdie = MockConsensusNode::run( -// tokio_handle.clone(), -// Ferdie, -// BasePath::new(directory.path().join("ferdie")), -// ); -// -// // Run Alice (a system domain authority node) -// let mut alice = domain_test_service::DomainNodeBuilder::new( -// tokio_handle.clone(), -// Alice, -// BasePath::new(directory.path().join("alice")), -// ) -// .run_relayer() -// .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) -// .await; -// -// // Run Bob (a core payments domain authority node) -// let mut bob = domain_test_service::DomainNodeBuilder::new( -// tokio_handle.clone(), -// Bob, -// BasePath::new(directory.path().join("bob")), -// ) -// .run_relayer() -// .build_core_payments_node(Role::Authority, &mut ferdie, &alice) -// .await; -// -// // Run Charlie (a core eth relay domain authority node) -// let mut charlie = domain_test_service::DomainNodeBuilder::new( -// tokio_handle.clone(), -// Charlie, -// BasePath::new(directory.path().join("charlie")), -// ) -// .run_relayer() -// .build_core_eth_relay_node(Role::Authority, &mut ferdie, &alice) -// .await; -// -// // Run the cross domain gossip message worker -// ferdie.start_cross_domain_gossip_message_worker(); -// -// produce_blocks!(ferdie, alice, bob, charlie, 3) -// .await -// .unwrap(); -// -// // Open channel between the system domain and the core payments domain -// let fee_model = FeeModel { -// outbox_fee: ExecutionFee { -// relayer_pool_fee: 2, -// compute_fee: 0, -// }, -// inbox_fee: ExecutionFee { -// relayer_pool_fee: 0, -// compute_fee: 5, -// }, -// }; -// bob.construct_and_send_extrinsic(pallet_sudo::Call::sudo { -// call: Box::new(core_payments_domain_test_runtime::RuntimeCall::Messenger( -// pallet_messenger::Call::initiate_channel { -// dst_domain_id: DomainId::SYSTEM, -// params: InitiateChannelParams { -// max_outgoing_messages: 100, -// fee_model, -// }, -// }, -// )), -// }) -// .await -// .expect("Failed to construct and send extrinsic"); -// // Wait until channel open -// produce_blocks_until!(ferdie, alice, bob, { -// alice -// .get_open_channel_for_domain(DomainId::CORE_PAYMENTS) -// .is_some() -// && bob.get_open_channel_for_domain(DomainId::SYSTEM).is_some() -// }) -// .await -// .unwrap(); -// -// // Transfer balance cross the system domain and the core payments domain -// let pre_alice_free_balance = alice.free_balance(alice.key.to_account_id()); -// let pre_bob_free_balance = bob.free_balance(bob.key.to_account_id()); -// let transfer_amount = 10; -// alice -// .construct_and_send_extrinsic(pallet_transporter::Call::transfer { -// dst_location: pallet_transporter::Location { -// domain_id: DomainId::CORE_PAYMENTS, -// account_id: AccountIdConverter::convert(Bob.into()), -// }, -// amount: transfer_amount, -// }) -// .await -// .expect("Failed to construct and send extrinsic"); -// // Wait until transfer succeed -// produce_blocks_until!(ferdie, alice, bob, charlie, { -// let post_alice_free_balance = alice.free_balance(alice.key.to_account_id()); -// let post_bob_free_balance = bob.free_balance(bob.key.to_account_id()); -// -// post_alice_free_balance -// == pre_alice_free_balance -// - transfer_amount -// - fee_model.outbox_fee().unwrap() -// - fee_model.inbox_fee().unwrap() -// && post_bob_free_balance == pre_bob_free_balance + transfer_amount -// }) -// .await -// .unwrap(); -// -// // Open channel between the core payments domain and the core eth relay domain -// let fee_model = FeeModel { -// outbox_fee: ExecutionFee { -// relayer_pool_fee: 1, -// compute_fee: 5, -// }, -// inbox_fee: ExecutionFee { -// relayer_pool_fee: 2, -// compute_fee: 3, -// }, -// }; -// charlie -// .construct_and_send_extrinsic(pallet_sudo::Call::sudo { -// call: Box::new(core_eth_relay_domain_test_runtime::RuntimeCall::Messenger( -// pallet_messenger::Call::initiate_channel { -// dst_domain_id: DomainId::CORE_PAYMENTS, -// params: InitiateChannelParams { -// max_outgoing_messages: 100, -// fee_model, -// }, -// }, -// )), -// }) -// .await -// .expect("Failed to construct and send extrinsic"); -// // Wait until channel open -// produce_blocks_until!(ferdie, alice, bob, charlie, { -// bob.get_open_channel_for_domain(DomainId::CORE_ETH_RELAY) -// .is_some() -// && charlie -// .get_open_channel_for_domain(DomainId::CORE_PAYMENTS) -// .is_some() -// }) -// .await -// .unwrap(); -// -// // Transfer balance cross the core payments domain and the core eth relay domain -// let pre_bob_free_balance = bob.free_balance(bob.key.to_account_id()); -// let pre_charlie_free_balance = charlie.free_balance(charlie.key.to_account_id()); -// let transfer_amount = 10; -// bob.construct_and_send_extrinsic(pallet_transporter::Call::transfer { -// dst_location: pallet_transporter::Location { -// domain_id: DomainId::CORE_ETH_RELAY, -// account_id: AccountIdConverter::convert(Charlie.into()), -// }, -// amount: transfer_amount, -// }) -// .await -// .expect("Failed to construct and send extrinsic"); -// // Wait until transfer succeed -// produce_blocks_until!(ferdie, alice, bob, charlie, { -// let post_bob_free_balance = bob.free_balance(bob.key.to_account_id()); -// let post_charlie_free_balance = charlie.free_balance(charlie.key.to_account_id()); -// -// post_bob_free_balance -// == pre_bob_free_balance -// - transfer_amount -// - fee_model.outbox_fee().unwrap() -// - fee_model.inbox_fee().unwrap() -// && post_charlie_free_balance == pre_charlie_free_balance + transfer_amount -// }) -// .await -// .unwrap(); -// } - -// TODO: Unlock test when multiple domains are supported in DecEx v2. -// #[tokio::test(flavor = "multi_thread")] -// async fn test_unordered_cross_domains_message_should_work() { -// let directory = TempDir::new().expect("Must be able to create temporary directory"); - -// let mut builder = sc_cli::LoggerBuilder::new(""); -// builder.with_colors(false); -// let _ = builder.init(); - -// let tokio_handle = tokio::runtime::Handle::current(); - -// // Start Ferdie -// let mut ferdie = MockConsensusNode::run( -// tokio_handle.clone(), -// Ferdie, -// BasePath::new(directory.path().join("ferdie")), -// ); - -// // Run Alice (a system domain authority node) -// let mut alice = domain_test_service::DomainNodeBuilder::new( -// tokio_handle.clone(), -// Alice, -// BasePath::new(directory.path().join("alice")), -// ) -// .run_relayer() -// .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) -// .await; - -// // Run Bob (a core payments domain authority node) -// let mut bob = domain_test_service::DomainNodeBuilder::new( -// tokio_handle.clone(), -// Bob, -// BasePath::new(directory.path().join("bob")), -// ) -// .run_relayer() -// .build_core_payments_node(Role::Authority, &mut ferdie, &alice) -// .await; - -// // Run Charlie (a core eth relay domain full node) and don't its relayer worker -// let charlie = domain_test_service::DomainNodeBuilder::new( -// tokio_handle.clone(), -// Charlie, -// BasePath::new(directory.path().join("charlie")), -// ) -// .build_core_payments_node(Role::Full, &mut ferdie, &alice) -// .await; -// let gossip_msg_sink = ferdie.xdm_gossip_worker_builder().gossip_msg_sink(); - -// // Run the cross domain gossip message worker -// ferdie.start_cross_domain_gossip_message_worker(); - -// produce_blocks!(ferdie, alice, bob, 3).await.unwrap(); - -// // Open channel between the system domain and the core payments domain -// let fee_model = FeeModel { -// outbox_fee: ExecutionFee { -// relayer_pool_fee: 2, -// compute_fee: 0, -// }, -// inbox_fee: ExecutionFee { -// relayer_pool_fee: 0, -// compute_fee: 5, -// }, -// }; -// bob.construct_and_send_extrinsic(pallet_sudo::Call::sudo { -// call: Box::new(core_payments_domain_test_runtime::RuntimeCall::Messenger( -// pallet_messenger::Call::initiate_channel { -// dst_domain_id: DomainId::SYSTEM, -// params: InitiateChannelParams { -// max_outgoing_messages: 1000, -// fee_model, -// }, -// }, -// )), -// }) -// .await -// .expect("Failed to construct and send extrinsic"); -// // Wait until channel open -// produce_blocks_until!(ferdie, alice, bob, charlie, { -// alice -// .get_open_channel_for_domain(DomainId::CORE_PAYMENTS) -// .is_some() -// && bob.get_open_channel_for_domain(DomainId::SYSTEM).is_some() -// }) -// .await -// .unwrap(); - -// // Register `charlie` as relayer such that message will assign to it, but as its relayer -// // is not started these massage won't be relayed. -// bob.construct_and_send_extrinsic(pallet_messenger::Call::join_relayer_set { -// relayer_id: Charlie.into(), -// }) -// .await -// .expect("Failed to construct and send extrinsic"); -// produce_blocks!(ferdie, alice, bob, charlie, 3) -// .await -// .unwrap(); - -// // Create cross domain message, only message assigned to `alice` and `bob` will be relayed -// // and send to tx pool, and these message is unordered because the message assigned to `charlie` -// // is not relayed. -// let relayer_id: AccountId = Charlie.into(); -// let alice_transfer_amount = 1; -// let bob_transfer_amount = 2; -// let pre_alice_free_balance = alice.free_balance(alice.key.to_account_id()); -// let pre_bob_free_balance = bob.free_balance(bob.key.to_account_id()); -// let mut alice_account_nonce = alice.account_nonce(); -// let mut bob_account_nonce = bob.account_nonce(); -// // Assigne `inbox_response` message to `charlie` -// for _ in 0..10 { -// let tx = alice.construct_extrinsic( -// alice_account_nonce, -// pallet_transporter::Call::transfer { -// dst_location: pallet_transporter::Location { -// domain_id: DomainId::CORE_PAYMENTS, -// account_id: AccountIdConverter::convert(Bob.into()), -// }, -// amount: alice_transfer_amount, -// }, -// ); -// alice -// .send_extrinsic(tx) -// .await -// .expect("Failed to send extrinsic"); -// alice_account_nonce += 1; - -// produce_blocks!(ferdie, alice, bob, charlie, 1) -// .await -// .unwrap(); -// } -// // Assigne `outbox` message to `charlie` -// for _ in 0..10 { -// let tx = bob.construct_extrinsic( -// bob_account_nonce, -// pallet_transporter::Call::transfer { -// dst_location: pallet_transporter::Location { -// domain_id: DomainId::SYSTEM, -// account_id: AccountIdConverter::convert(Alice.into()), -// }, -// amount: bob_transfer_amount, -// }, -// ); -// bob.send_extrinsic(tx) -// .await -// .expect("Failed to send extrinsic"); -// bob_account_nonce += 1; - -// produce_blocks!(ferdie, alice, bob, charlie, 1) -// .await -// .unwrap(); -// } - -// // Run charlie's relayer worker, the message assigned to `charlie` will be relayed -// // and send to tx pool now -// let relayer_worker = domain_client_message_relayer::worker::relay_core_domain_messages::< -// _, -// _, -// PBlock, -// _, -// _, -// _, -// _, -// _, -// >( -// relayer_id, -// charlie.client.clone(), -// alice.client.clone(), -// alice.sync_service.clone(), -// charlie.sync_service.clone(), -// gossip_msg_sink, -// ); -// bob.task_manager -// .spawn_essential_handle() -// .spawn_essential_blocking( -// "core-domain-relayer-charlie", -// None, -// Box::pin(relayer_worker), -// ); - -// // Wait until all message are relayed and handled -// let fee = fee_model.outbox_fee().unwrap() + fee_model.inbox_fee().unwrap(); -// produce_blocks_until!(ferdie, alice, bob, { -// let post_alice_free_balance = alice.free_balance(alice.key.to_account_id()); -// let post_bob_free_balance = bob.free_balance(bob.key.to_account_id()); - -// post_alice_free_balance -// == pre_alice_free_balance - alice_transfer_amount * 10 + bob_transfer_amount * 10 -// - fee * 10 -// && post_bob_free_balance -// == pre_bob_free_balance - bob_transfer_amount * 10 + alice_transfer_amount * 10 -// - fee * 10 -// }) -// .await -// .unwrap(); -// } +#[tokio::test(flavor = "multi_thread")] +async fn test_xdm_between_domains_should_work() { + let directory = TempDir::new().expect("Must be able to create temporary directory"); + + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_colors(false); + let _ = builder.init(); + + let tokio_handle = tokio::runtime::Handle::current(); + + // Start Ferdie + let mut ferdie = MockConsensusNode::run( + tokio_handle.clone(), + Sr25519Alice, + BasePath::new(directory.path().join("ferdie")), + ); + + // Run Alice (a system domain authority node) + let mut alice = domain_test_service::DomainNodeBuilder::new( + tokio_handle.clone(), + Alice, + BasePath::new(directory.path().join("alice")), + ) + .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .await; + + // Run Bob (a auto-id domain authority node) + let mut bob = domain_test_service::DomainNodeBuilder::new( + tokio_handle.clone(), + Bob, + BasePath::new(directory.path().join("bob")), + ) + .build_auto_id_node(Sr25519Keyring::Bob, Role::Authority, &mut ferdie) + .await; + + // Run the cross domain gossip message worker + ferdie.start_cross_domain_gossip_message_worker(); + + produce_blocks!(ferdie, alice, 3, bob).await.unwrap(); + + // add consensus chain to domain chain allow list + ferdie + .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( + pallet_messenger::Call::initiate_domain_update_chain_allowlist { + domain_id: EVM_DOMAIN_ID, + update: ChainAllowlistUpdate::Add(ChainId::Domain(AUTO_ID_DOMAIN_ID)), + }, + )) + .await + .expect("Failed to construct and send domain chain allowlist update"); + + // produce another block so allowlist on domain are updated + produce_blocks!(ferdie, alice, 1, bob).await.unwrap(); + + // add consensus chain to domain chain allow list + ferdie + .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( + pallet_messenger::Call::initiate_domain_update_chain_allowlist { + domain_id: AUTO_ID_DOMAIN_ID, + update: ChainAllowlistUpdate::Add(ChainId::Domain(EVM_DOMAIN_ID)), + }, + )) + .await + .expect("Failed to construct and send domain chain allowlist update"); + + // produce another block so allowlist on domain are updated + produce_blocks!(ferdie, alice, 1, bob).await.unwrap(); + + // Open channel between the evm domain and the auto-id domain + bob.construct_and_send_extrinsic(auto_id_domain_test_runtime::RuntimeCall::Messenger( + pallet_messenger::Call::initiate_channel { + dst_chain_id: ChainId::Domain(EVM_DOMAIN_ID), + params: pallet_messenger::InitiateChannelParams { + max_outgoing_messages: 100, + }, + }, + )) + .await + .expect("Failed to construct and send extrinsic"); + + // Wait until channel open + produce_blocks_until!( + ferdie, + alice, + { + alice + .get_open_channel_for_chain(ChainId::Domain(AUTO_ID_DOMAIN_ID)) + .is_some() + && bob + .get_open_channel_for_chain(ChainId::Domain(EVM_DOMAIN_ID)) + .is_some() + }, + bob + ) + .await + .unwrap(); + + // Transfer balance cross the system domain and the core payments domain + let pre_alice_free_balance = alice.free_balance(alice.key.to_account_id()); + let pre_bob_free_balance = bob.free_balance(bob.key.to_account_id()); + let transfer_amount = 10; + alice + .construct_and_send_extrinsic(pallet_transporter::Call::transfer { + dst_location: pallet_transporter::Location { + chain_id: ChainId::Domain(AUTO_ID_DOMAIN_ID), + account_id: AccountIdConverter::convert(Sr25519Keyring::Bob.into()), + }, + amount: transfer_amount, + }) + .await + .expect("Failed to construct and send extrinsic"); + // Wait until transfer succeed + produce_blocks_until!( + ferdie, + alice, + { + let post_alice_free_balance = alice.free_balance(alice.key.to_account_id()); + let post_bob_free_balance = bob.free_balance(bob.key.to_account_id()); + + post_alice_free_balance <= pre_alice_free_balance - transfer_amount + && post_bob_free_balance == pre_bob_free_balance + transfer_amount + }, + bob + ) + .await + .unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_unordered_cross_domains_message_should_work() { + let directory = TempDir::new().expect("Must be able to create temporary directory"); + + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_colors(false); + let _ = builder.init(); + + let tokio_handle = tokio::runtime::Handle::current(); + + // Start Ferdie + let mut ferdie = MockConsensusNode::run( + tokio_handle.clone(), + Sr25519Alice, + BasePath::new(directory.path().join("ferdie")), + ); + + // Run Alice (a system domain authority node) + let mut alice = domain_test_service::DomainNodeBuilder::new( + tokio_handle.clone(), + Alice, + BasePath::new(directory.path().join("alice")), + ) + .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .await; + + let evm_domain_tx_pool_sink = ferdie + .xdm_gossip_worker_builder() + .remove_chain_sink(&ChainId::Domain(EVM_DOMAIN_ID)) + .unwrap(); + let (reorder_xdm_sink, mut reorder_xdm_receiver) = tracing_unbounded("reorder_xdm", 100); + ferdie + .xdm_gossip_worker_builder() + .push_chain_sink(ChainId::Domain(EVM_DOMAIN_ID), reorder_xdm_sink); + + alice + .task_manager + .spawn_essential_handle() + .spawn_essential_blocking( + "reordering-xdm", + None, + Box::pin(async move { + let mut i = 0; + let mut msg_buffer = VecDeque::new(); + while let Some(xdm) = reorder_xdm_receiver.next().await { + if i % 3 == 0 { + msg_buffer.push_back(xdm); + if let Some(xdm) = msg_buffer.pop_front() { + if i % 2 == 0 { + evm_domain_tx_pool_sink.unbounded_send(xdm).unwrap(); + } + } + } else { + evm_domain_tx_pool_sink.unbounded_send(xdm).unwrap(); + } + i += 1; + } + }), + ); + + // Run the cross domain gossip message worker + ferdie.start_cross_domain_gossip_message_worker(); + + produce_blocks!(ferdie, alice, 3).await.unwrap(); + + // add domain to consensus chain allowlist + ferdie + .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { + call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( + pallet_messenger::Call::update_consensus_chain_allowlist { + update: ChainAllowlistUpdate::Add(ChainId::Domain(GENESIS_DOMAIN_ID)), + }, + )), + }) + .await + .expect("Failed to construct and send consensus chain allowlist update"); + + // produce another block so allowlist on consensus is updated + produce_blocks!(ferdie, alice, 1).await.unwrap(); + + // add consensus chain to domain chain allow list + ferdie + .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( + pallet_messenger::Call::initiate_domain_update_chain_allowlist { + domain_id: GENESIS_DOMAIN_ID, + update: ChainAllowlistUpdate::Add(ChainId::Consensus), + }, + )) + .await + .expect("Failed to construct and send domain chain allowlist update"); + + // produce another block so allowlist on domain are updated + produce_blocks!(ferdie, alice, 1).await.unwrap(); + + // Open channel between the evm domain and the auto-id domain + alice + .construct_and_send_extrinsic(evm_domain_test_runtime::RuntimeCall::Messenger( + pallet_messenger::Call::initiate_channel { + dst_chain_id: ChainId::Consensus, + params: pallet_messenger::InitiateChannelParams { + max_outgoing_messages: 100, + }, + }, + )) + .await + .expect("Failed to construct and send extrinsic"); + + // Wait until channel open + produce_blocks_until!(ferdie, alice, { + alice + .get_open_channel_for_chain(ChainId::Consensus) + .is_some() + }) + .await + .unwrap(); + + // Transfer balance cross the system domain and the core payments domain + let pre_alice_free_balance = alice.free_balance(alice.key.to_account_id()); + let pre_ferdie_free_balance = ferdie.free_balance(ferdie.key.to_account_id()); + let transfer_amount = 10; + for _ in 0..20 { + ferdie + .construct_and_send_extrinsic_with(pallet_transporter::Call::transfer { + dst_location: pallet_transporter::Location { + chain_id: ChainId::Domain(GENESIS_DOMAIN_ID), + account_id: AccountId20Converter::convert(Alice.to_account_id()), + }, + amount: transfer_amount, + }) + .await + .expect("Failed to construct and send extrinsic"); + produce_blocks!(ferdie, alice, 1).await.unwrap(); + } + // Wait until transfer succeed + produce_blocks_until!(ferdie, alice, { + let post_alice_free_balance = alice.free_balance(alice.key.to_account_id()); + let post_ferdie_free_balance = ferdie.free_balance(ferdie.key.to_account_id()); + + post_alice_free_balance == pre_alice_free_balance + transfer_amount * 20 + && post_ferdie_free_balance <= pre_ferdie_free_balance - transfer_amount * 20 + }) + .await + .unwrap(); +} #[tokio::test(flavor = "multi_thread")] // TODO: https://github.com/subspace/subspace/pull/1954 broke this on Windows, we suspect the test From c9fb510807b06364e334a284c721c0fbf1b39b07 Mon Sep 17 00:00:00 2001 From: linning Date: Fri, 12 Jul 2024 20:08:45 +0800 Subject: [PATCH 08/37] Minor refactoring/clean up to the domain integration tests Signed-off-by: linning --- Cargo.lock | 50 +++++ crates/sp-domains-fraud-proof/src/tests.rs | 28 ++- domains/client/domain-operator/src/tests.rs | 220 ++++++++------------ domains/test/service/src/domain.rs | 16 +- domains/test/service/src/lib.rs | 3 - 5 files changed, 150 insertions(+), 167 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06c836a87a..be42f65e86 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1158,6 +1158,53 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "auto-id-domain-test-runtime" +version = "0.1.0" +dependencies = [ + "domain-pallet-executive", + "domain-runtime-primitives", + "domain-test-primitives", + "frame-benchmarking", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "pallet-auto-id", + "pallet-balances", + "pallet-block-fees", + "pallet-domain-id", + "pallet-domain-sudo", + "pallet-messenger", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-transporter", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-core", + "sp-domain-sudo", + "sp-domains", + "sp-genesis-builder", + "sp-inherents", + "sp-messenger", + "sp-messenger-host-functions", + "sp-mmr-primitives", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-storage", + "sp-subspace-mmr", + "sp-transaction-pool", + "sp-version", + "subspace-core-primitives", + "subspace-runtime-primitives", + "substrate-wasm-builder", +] + [[package]] name = "auto_impl" version = "1.2.0" @@ -2728,6 +2775,7 @@ dependencies = [ name = "domain-client-operator" version = "0.1.0" dependencies = [ + "auto-id-domain-test-runtime", "cross-domain-message-gossip", "domain-block-builder", "domain-block-preprocessor", @@ -2936,6 +2984,7 @@ dependencies = [ name = "domain-test-service" version = "0.1.0" dependencies = [ + "auto-id-domain-test-runtime", "cross-domain-message-gossip", "domain-client-operator", "domain-runtime-primitives", @@ -13171,6 +13220,7 @@ dependencies = [ name = "subspace-test-client" version = "0.1.0" dependencies = [ + "auto-id-domain-test-runtime", "domain-runtime-primitives", "evm-domain-test-runtime", "fp-evm", diff --git a/crates/sp-domains-fraud-proof/src/tests.rs b/crates/sp-domains-fraud-proof/src/tests.rs index a6739b178b..a7b13067d1 100644 --- a/crates/sp-domains-fraud-proof/src/tests.rs +++ b/crates/sp-domains-fraud-proof/src/tests.rs @@ -8,7 +8,7 @@ use domain_test_service::evm_domain_test_runtime::{ }; use domain_test_service::EcdsaKeyring::{Alice, Charlie}; use domain_test_service::Sr25519Keyring::Ferdie; -use domain_test_service::{construct_extrinsic_raw_payload, EvmDomainNode, GENESIS_DOMAIN_ID}; +use domain_test_service::{construct_extrinsic_raw_payload, EvmDomainNode}; use ethereum::TransactionV2 as Transaction; use fp_rpc::EthereumRuntimeRPCApi; use frame_support::pallet_prelude::DispatchClass; @@ -295,10 +295,9 @@ async fn domain_bundle_storage_proof_benchmark() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let (recorded_keys, storage_proof) = benchmark_bundle_with_evm_tx(400, alice, ferdie).await; @@ -335,10 +334,9 @@ async fn storage_change_of_the_same_runtime_instance_should_perserved_cross_runt // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let best_hash = alice.client.as_ref().info().best_hash; @@ -529,10 +527,9 @@ async fn check_bundle_validity_runtime_api_should_work() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let mut alice_nonce = alice.account_nonce(); @@ -687,10 +684,9 @@ async fn test_evm_domain_block_fee() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -807,10 +803,10 @@ async fn test_evm_domain_block_fee() { // // Run Alice (a evm domain authority node) // let mut alice = domain_test_service::DomainNodeBuilder::new( // tokio_handle.clone(), -// Alice, +// // BasePath::new(directory.path().join("alice")), // ) -// .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) +// .build_evm_node(Role::Authority, Alice, &mut ferdie) // .await; // // // Run Bob (a evm domain full node) @@ -819,7 +815,7 @@ async fn test_evm_domain_block_fee() { // Bob, // BasePath::new(directory.path().join("bob")), // ) -// .build_evm_node(Role::Full, GENESIS_DOMAIN_ID, &mut ferdie) +// .build_evm_node(Role::Full, Alice, &mut ferdie) // .await; // // // Bob is able to sync blocks. @@ -1101,10 +1097,10 @@ async fn test_evm_domain_block_fee() { // // Run Alice (a evm domain authority node) // let mut alice = domain_test_service::DomainNodeBuilder::new( // tokio_handle.clone(), -// Alice, +// // BasePath::new(directory.path().join("alice")), // ) -// .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) +// .build_evm_node(Role::Authority, Alice, &mut ferdie) // .await; // // // Run Bob (a evm domain full node) @@ -1113,7 +1109,7 @@ async fn test_evm_domain_block_fee() { // Bob, // BasePath::new(directory.path().join("bob")), // ) -// .build_evm_node(Role::Full, GENESIS_DOMAIN_ID, &mut ferdie) +// .build_evm_node(Role::Full, Alice, &mut ferdie) // .await; // // // Bob is able to sync blocks. @@ -1294,7 +1290,7 @@ async fn test_evm_domain_block_fee() { // // Run Alice (a system domain authority node) // let mut alice = domain_test_service::DomainNodeBuilder::new( // tokio_handle.clone(), -// Alice, +// // BasePath::new(directory.path().join("alice")), // ) // .build_evm_node(Role::Authority, &mut ferdie) diff --git a/domains/client/domain-operator/src/tests.rs b/domains/client/domain-operator/src/tests.rs index 6f0116431a..bfb9c3e573 100644 --- a/domains/client/domain-operator/src/tests.rs +++ b/domains/client/domain-operator/src/tests.rs @@ -11,9 +11,7 @@ use domain_test_primitives::{OnchainStateApi, TimestampApi}; use domain_test_service::evm_domain_test_runtime::{Header, UncheckedExtrinsic}; use domain_test_service::EcdsaKeyring::{Alice, Bob, Charlie, Eve}; use domain_test_service::Sr25519Keyring::{self, Alice as Sr25519Alice, Ferdie}; -use domain_test_service::{ - construct_extrinsic_generic, AUTO_ID_DOMAIN_ID, EVM_DOMAIN_ID, GENESIS_DOMAIN_ID, -}; +use domain_test_service::{construct_extrinsic_generic, AUTO_ID_DOMAIN_ID, EVM_DOMAIN_ID}; use futures::StreamExt; use pallet_messenger::ChainAllowlistUpdate; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, HeaderBackend}; @@ -95,17 +93,16 @@ async fn test_domain_instance_bootstrapper() { let expected_genesis_state_root = ferdie .client .runtime_api() - .genesis_state_root(ferdie.client.info().best_hash, GENESIS_DOMAIN_ID) + .genesis_state_root(ferdie.client.info().best_hash, EVM_DOMAIN_ID) .unwrap() .unwrap(); // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let genesis_state_root = *alice @@ -142,10 +139,9 @@ async fn test_domain_chain_fork_choice() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -216,10 +212,9 @@ async fn test_domain_block_production() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; for i in 0..50 { @@ -323,10 +318,9 @@ async fn test_processing_empty_consensus_block() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let domain_block_processor = DomainBlockProcessor { @@ -410,10 +404,9 @@ async fn test_domain_block_deriving_from_multiple_bundles() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -484,10 +477,9 @@ async fn collected_receipts_should_be_on_the_same_branch_with_current_best_block // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut consensus_node) + .build_evm_node(Role::Authority, Alice, &mut consensus_node) .await; produce_blocks!(consensus_node, alice, 3) @@ -656,20 +648,18 @@ async fn test_domain_tx_propagate() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run Bob (a evm domain full node) let mut bob = domain_test_service::DomainNodeBuilder::new( tokio_handle, - Bob, BasePath::new(directory.path().join("bob")), ) .connect_to_domain_node(alice.addr.clone()) - .build_evm_node(Role::Full, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Full, Bob, &mut ferdie) .await; produce_blocks!(ferdie, alice, 5, bob).await.unwrap(); @@ -730,19 +720,17 @@ async fn test_executor_full_node_catching_up() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run Bob (a evm domain full node) let bob = domain_test_service::DomainNodeBuilder::new( tokio_handle, - Bob, BasePath::new(directory.path().join("bob")), ) - .build_evm_node(Role::Full, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Full, Bob, &mut ferdie) .await; // Bob is able to sync blocks. @@ -782,19 +770,17 @@ async fn test_executor_inherent_timestamp_is_set() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run Bob who runs the authority node for core domain let bob = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Bob, BasePath::new(directory.path().join("bob")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Bob, &mut ferdie) .await; produce_blocks!(ferdie, alice, 1, bob).await.unwrap(); @@ -837,10 +823,9 @@ async fn test_bad_invalid_state_transition_proof_is_rejected() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let fraud_proof_generator = FraudProofGenerator::new( @@ -930,7 +915,7 @@ async fn test_bad_invalid_state_transition_proof_is_rejected() { let mut fraud_proof = fraud_proof_generator .generate_invalid_state_transition_proof( - GENESIS_DOMAIN_ID, + EVM_DOMAIN_ID, execution_phase, &valid_receipt, dummy_execution_trace.len(), @@ -1098,10 +1083,9 @@ async fn test_invalid_state_transition_proof_creation_and_verification( // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -1274,10 +1258,9 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -1418,10 +1401,9 @@ async fn test_false_invalid_bundles_inherent_extrinsic_proof_creation_and_verifi // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -1531,10 +1513,9 @@ async fn test_true_invalid_bundles_illegal_xdm_proof_creation_and_verification() // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -1560,7 +1541,7 @@ async fn test_true_invalid_bundles_illegal_xdm_proof_creation_and_verification() pallet_messenger::Call::relay_message { msg: CrossDomainMessage { src_chain_id: ChainId::Consensus, - dst_chain_id: ChainId::Domain(GENESIS_DOMAIN_ID), + dst_chain_id: ChainId::Domain(EVM_DOMAIN_ID), channel_id: Default::default(), nonce: Default::default(), proof: Proof::Domain { @@ -1686,10 +1667,9 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -1850,10 +1830,9 @@ async fn test_false_invalid_bundles_illegal_extrinsic_proof_creation_and_verific // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -1983,10 +1962,9 @@ async fn test_true_invalid_bundle_weight_proof_creation_and_verification() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2103,10 +2081,9 @@ async fn test_false_invalid_bundle_weight_proof_creation_and_verification() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2214,10 +2191,9 @@ async fn test_false_invalid_bundles_non_exist_extrinsic_proof_creation_and_verif // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2326,10 +2302,9 @@ async fn test_invalid_block_fees_proof_creation() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2408,10 +2383,9 @@ async fn test_invalid_transfers_fraud_proof() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2510,10 +2484,9 @@ async fn test_invalid_domain_block_hash_proof_creation() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2607,10 +2580,9 @@ async fn test_invalid_domain_extrinsics_root_proof_creation() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -2704,10 +2676,9 @@ async fn test_domain_block_builder_include_ext_with_failed_execution() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 1).await.unwrap(); @@ -2788,10 +2759,9 @@ async fn test_domain_block_builder_include_ext_with_failed_predispatch() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 1).await.unwrap(); @@ -2893,10 +2863,9 @@ async fn test_valid_bundle_proof_generation_and_verification() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; for i in 0..3 { @@ -3047,10 +3016,9 @@ async fn set_new_code_should_work() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 1).await.unwrap(); @@ -3118,19 +3086,17 @@ async fn pallet_domains_unsigned_extrinsics_should_work() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run Bob (a evm domain full node) let bob = domain_test_service::DomainNodeBuilder::new( tokio_handle, - Bob, BasePath::new(directory.path().join("bob")), ) - .build_evm_node(Role::Full, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Full, Bob, &mut ferdie) .await; produce_blocks!(ferdie, alice, 1).await.unwrap(); @@ -3215,10 +3181,9 @@ async fn stale_and_in_future_bundle_should_be_rejected() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 10).await.unwrap(); @@ -3235,7 +3200,7 @@ async fn stale_and_in_future_bundle_should_be_rejected() { let operator_id = 0; let mut bundle_producer = { let domain_bundle_proposer = DomainBundleProposer::new( - GENESIS_DOMAIN_ID, + EVM_DOMAIN_ID, alice.client.clone(), ferdie.client.clone(), alice.operator.transaction_pool.clone(), @@ -3243,7 +3208,7 @@ async fn stale_and_in_future_bundle_should_be_rejected() { let (bundle_sender, _bundle_receiver) = sc_utils::mpsc::tracing_unbounded("domain_bundle_stream", 100); DomainBundleProducer::new( - GENESIS_DOMAIN_ID, + EVM_DOMAIN_ID, ferdie.client.clone(), alice.client.clone(), domain_bundle_proposer, @@ -3378,10 +3343,9 @@ async fn existing_bundle_can_be_resubmitted_to_new_fork() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -3438,10 +3402,9 @@ async fn test_domain_sudo_calls() { // Run Alice (an evm domain) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run the cross domain gossip message worker @@ -3454,7 +3417,7 @@ async fn test_domain_sudo_calls() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::update_consensus_chain_allowlist { - update: ChainAllowlistUpdate::Add(ChainId::Domain(GENESIS_DOMAIN_ID)), + update: ChainAllowlistUpdate::Add(ChainId::Domain(EVM_DOMAIN_ID)), }, )), }) @@ -3468,7 +3431,7 @@ async fn test_domain_sudo_calls() { ferdie .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::initiate_domain_update_chain_allowlist { - domain_id: GENESIS_DOMAIN_ID, + domain_id: EVM_DOMAIN_ID, update: ChainAllowlistUpdate::Add(ChainId::Consensus), }, )) @@ -3522,7 +3485,7 @@ async fn test_domain_sudo_calls() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Domains( pallet_domains::Call::send_domain_sudo_call { - domain_id: GENESIS_DOMAIN_ID, + domain_id: EVM_DOMAIN_ID, call: sudo_unsigned_extrinsic, }, )), @@ -3563,10 +3526,9 @@ async fn test_xdm_between_consensus_and_domain_should_work() { // Run Alice (an evm domain) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run the cross domain gossip message worker @@ -3579,7 +3541,7 @@ async fn test_xdm_between_consensus_and_domain_should_work() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::update_consensus_chain_allowlist { - update: ChainAllowlistUpdate::Add(ChainId::Domain(GENESIS_DOMAIN_ID)), + update: ChainAllowlistUpdate::Add(ChainId::Domain(EVM_DOMAIN_ID)), }, )), }) @@ -3593,7 +3555,7 @@ async fn test_xdm_between_consensus_and_domain_should_work() { ferdie .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::initiate_domain_update_chain_allowlist { - domain_id: GENESIS_DOMAIN_ID, + domain_id: EVM_DOMAIN_ID, update: ChainAllowlistUpdate::Add(ChainId::Consensus), }, )) @@ -3606,11 +3568,11 @@ async fn test_xdm_between_consensus_and_domain_should_work() { // there should be zero channel updates on both consensus and domain chain let consensus_channel_storage = ChannelStorage::new(ChainId::Consensus); assert!(consensus_channel_storage - .get_channel_state_for(&*ferdie.client, GENESIS_DOMAIN_ID.into(), ChannelId::zero()) + .get_channel_state_for(&*ferdie.client, EVM_DOMAIN_ID.into(), ChannelId::zero()) .unwrap() .is_none()); - let domain_channel_storage = ChannelStorage::new(GENESIS_DOMAIN_ID.into()); + let domain_channel_storage = ChannelStorage::new(EVM_DOMAIN_ID.into()); assert!(domain_channel_storage .get_channel_state_for(&*ferdie.client, ChainId::Consensus, ChannelId::zero(),) .unwrap() @@ -3643,7 +3605,7 @@ async fn test_xdm_between_consensus_and_domain_should_work() { // consensus channel update let channel_update = consensus_channel_storage - .get_channel_state_for(&*ferdie.client, GENESIS_DOMAIN_ID.into(), ChannelId::zero()) + .get_channel_state_for(&*ferdie.client, EVM_DOMAIN_ID.into(), ChannelId::zero()) .unwrap() .unwrap(); @@ -3695,7 +3657,7 @@ async fn test_xdm_between_consensus_and_domain_should_work() { // consensus channel update let channel_update = consensus_channel_storage - .get_channel_state_for(&*ferdie.client, GENESIS_DOMAIN_ID.into(), ChannelId::zero()) + .get_channel_state_for(&*ferdie.client, EVM_DOMAIN_ID.into(), ChannelId::zero()) .unwrap() .unwrap(); @@ -3711,7 +3673,7 @@ async fn test_xdm_between_consensus_and_domain_should_work() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::close_channel { - chain_id: ChainId::Domain(GENESIS_DOMAIN_ID), + chain_id: ChainId::Domain(EVM_DOMAIN_ID), channel_id, }, )), @@ -3768,19 +3730,17 @@ async fn test_xdm_between_domains_should_work() { // Run Alice (a system domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Run Bob (a auto-id domain authority node) let mut bob = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Bob, BasePath::new(directory.path().join("bob")), ) - .build_auto_id_node(Sr25519Keyring::Bob, Role::Authority, &mut ferdie) + .build_auto_id_node(Role::Authority, Sr25519Keyring::Bob, &mut ferdie) .await; // Run the cross domain gossip message worker @@ -3896,10 +3856,9 @@ async fn test_unordered_cross_domains_message_should_work() { // Run Alice (a system domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let evm_domain_tx_pool_sink = ferdie @@ -3946,7 +3905,7 @@ async fn test_unordered_cross_domains_message_should_work() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::update_consensus_chain_allowlist { - update: ChainAllowlistUpdate::Add(ChainId::Domain(GENESIS_DOMAIN_ID)), + update: ChainAllowlistUpdate::Add(ChainId::Domain(EVM_DOMAIN_ID)), }, )), }) @@ -3960,7 +3919,7 @@ async fn test_unordered_cross_domains_message_should_work() { ferdie .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::initiate_domain_update_chain_allowlist { - domain_id: GENESIS_DOMAIN_ID, + domain_id: EVM_DOMAIN_ID, update: ChainAllowlistUpdate::Add(ChainId::Consensus), }, )) @@ -4000,7 +3959,7 @@ async fn test_unordered_cross_domains_message_should_work() { ferdie .construct_and_send_extrinsic_with(pallet_transporter::Call::transfer { dst_location: pallet_transporter::Location { - chain_id: ChainId::Domain(GENESIS_DOMAIN_ID), + chain_id: ChainId::Domain(EVM_DOMAIN_ID), account_id: AccountId20Converter::convert(Alice.to_account_id()), }, amount: transfer_amount, @@ -4045,10 +4004,9 @@ async fn test_restart_domain_operator() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 5).await.unwrap(); @@ -4061,7 +4019,7 @@ async fn test_restart_domain_operator() { std::fs::remove_file( directory .path() - .join(format!("alice/domain-{GENESIS_DOMAIN_ID:?}")) + .join(format!("alice/domain-{EVM_DOMAIN_ID:?}")) .as_path() .join("paritydb/lock"), ) @@ -4078,10 +4036,9 @@ async fn test_restart_domain_operator() { // Restart Alice let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 5).await.unwrap(); @@ -4111,10 +4068,9 @@ async fn test_domain_transaction_fee_and_operator_reward() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -4187,10 +4143,9 @@ async fn test_multiple_consensus_blocks_derive_similar_domain_block() { // Run Alice (a evm domain authority node) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -4326,11 +4281,10 @@ async fn test_skip_empty_bundle_production() { // Run Alice (a evm domain authority node) with `skip_empty_bundle_production` set to `true` let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) .skip_empty_bundle() - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // Wait for `BlockTreePruningDepth + 1` blocks which is 10 + 1 in test @@ -4378,10 +4332,9 @@ async fn test_bad_receipt_chain() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -4393,7 +4346,7 @@ async fn test_bad_receipt_chain() { let mut bundle_producer = { let domain_bundle_proposer = DomainBundleProposer::new( - GENESIS_DOMAIN_ID, + EVM_DOMAIN_ID, alice.client.clone(), ferdie.client.clone(), alice.operator.transaction_pool.clone(), @@ -4401,7 +4354,7 @@ async fn test_bad_receipt_chain() { let (bundle_sender, _bundle_receiver) = sc_utils::mpsc::tracing_unbounded("domain_bundle_stream", 100); DomainBundleProducer::new( - GENESIS_DOMAIN_ID, + EVM_DOMAIN_ID, ferdie.client.clone(), alice.client.clone(), domain_bundle_proposer, @@ -4513,7 +4466,7 @@ async fn test_bad_receipt_chain() { for receipt_hash in bad_receipt_descendants { assert!(ferdie.does_receipt_exist(receipt_hash).unwrap()); assert!(runtime_api - .is_bad_er_pending_to_prune(ferdie_best_hash, GENESIS_DOMAIN_ID, receipt_hash) + .is_bad_er_pending_to_prune(ferdie_best_hash, EVM_DOMAIN_ID, receipt_hash) .unwrap()); } } @@ -4538,10 +4491,9 @@ async fn test_domain_chain_storage_price_should_be_aligned_with_the_consensus_ch // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; // The domain transaction byte is non-zero on the consensus chain genesis but @@ -4596,10 +4548,9 @@ async fn test_skip_duplicated_tx_in_previous_bundle() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bob_pre_balance = alice.free_balance(Bob.to_account_id()); @@ -4672,10 +4623,9 @@ async fn test_handle_duplicated_tx_with_diff_nonce_in_previous_bundle() { // Run Alice (a evm domain authority node) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let nonce = alice.account_nonce(); @@ -4757,10 +4707,9 @@ async fn test_verify_mmr_proof_stateless() { // Run Alice (an evm domain) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -4834,10 +4783,9 @@ async fn test_equivocated_bundle_check() { // Run Alice (an evm domain) let alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; produce_blocks!(ferdie, alice, 3).await.unwrap(); @@ -4892,7 +4840,7 @@ async fn test_equivocated_bundle_check() { let bundles = ferdie .client .runtime_api() - .extract_successful_bundles(block_hash, GENESIS_DOMAIN_ID, block_body) + .extract_successful_bundles(block_hash, EVM_DOMAIN_ID, block_body) .unwrap(); assert_eq!(bundles, vec![opaque_bundle]); @@ -4911,7 +4859,7 @@ async fn test_equivocated_bundle_check() { let bundles = ferdie .client .runtime_api() - .extract_successful_bundles(block_hash, GENESIS_DOMAIN_ID, block_body) + .extract_successful_bundles(block_hash, EVM_DOMAIN_ID, block_body) .unwrap(); assert_eq!(bundles, vec![opaque_bundle]); @@ -4967,10 +4915,9 @@ async fn test_xdm_false_invalid_fraud_proof() { // Run Alice (an evm domain) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -4990,7 +4937,7 @@ async fn test_xdm_false_invalid_fraud_proof() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::update_consensus_chain_allowlist { - update: ChainAllowlistUpdate::Add(ChainId::Domain(GENESIS_DOMAIN_ID)), + update: ChainAllowlistUpdate::Add(ChainId::Domain(EVM_DOMAIN_ID)), }, )), }) @@ -5004,7 +4951,7 @@ async fn test_xdm_false_invalid_fraud_proof() { ferdie .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::initiate_domain_update_chain_allowlist { - domain_id: GENESIS_DOMAIN_ID, + domain_id: EVM_DOMAIN_ID, update: ChainAllowlistUpdate::Add(ChainId::Consensus), }, )) @@ -5039,7 +4986,7 @@ async fn test_xdm_false_invalid_fraud_proof() { ferdie .construct_and_send_extrinsic_with(pallet_transporter::Call::transfer { dst_location: pallet_transporter::Location { - chain_id: ChainId::Domain(GENESIS_DOMAIN_ID), + chain_id: ChainId::Domain(EVM_DOMAIN_ID), account_id: AccountId20Converter::convert(Alice.to_account_id()), }, amount: 10, @@ -5158,10 +5105,9 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { // Run Alice (an evm domain) let mut alice = domain_test_service::DomainNodeBuilder::new( tokio_handle.clone(), - Alice, BasePath::new(directory.path().join("alice")), ) - .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .build_evm_node(Role::Authority, Alice, &mut ferdie) .await; let bundle_to_tx = |opaque_bundle| { @@ -5181,7 +5127,7 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { .construct_and_send_extrinsic_with(pallet_sudo::Call::sudo { call: Box::new(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::update_consensus_chain_allowlist { - update: ChainAllowlistUpdate::Add(ChainId::Domain(GENESIS_DOMAIN_ID)), + update: ChainAllowlistUpdate::Add(ChainId::Domain(EVM_DOMAIN_ID)), }, )), }) @@ -5195,7 +5141,7 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { ferdie .construct_and_send_extrinsic_with(subspace_test_runtime::RuntimeCall::Messenger( pallet_messenger::Call::initiate_domain_update_chain_allowlist { - domain_id: GENESIS_DOMAIN_ID, + domain_id: EVM_DOMAIN_ID, update: ChainAllowlistUpdate::Add(ChainId::Consensus), }, )) @@ -5230,7 +5176,7 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { ferdie .construct_and_send_extrinsic_with(pallet_transporter::Call::transfer { dst_location: pallet_transporter::Location { - chain_id: ChainId::Domain(GENESIS_DOMAIN_ID), + chain_id: ChainId::Domain(EVM_DOMAIN_ID), account_id: AccountId20Converter::convert(Alice.to_account_id()), }, amount: 10, diff --git a/domains/test/service/src/domain.rs b/domains/test/service/src/domain.rs index 307fa10759..628afae048 100644 --- a/domains/test/service/src/domain.rs +++ b/domains/test/service/src/domain.rs @@ -406,7 +406,6 @@ where /// A builder to create a [`DomainNode`]. pub struct DomainNodeBuilder { tokio_handle: tokio::runtime::Handle, - key: EcdsaKeyring, domain_nodes: Vec, domain_nodes_exclusive: bool, skip_empty_bundle_production: bool, @@ -418,13 +417,8 @@ impl DomainNodeBuilder { /// /// `tokio_handle` - The tokio handler to use. /// `base_path` - Where databases will be stored. - pub fn new( - tokio_handle: tokio::runtime::Handle, - key: EcdsaKeyring, - base_path: BasePath, - ) -> Self { + pub fn new(tokio_handle: tokio::runtime::Handle, base_path: BasePath) -> Self { DomainNodeBuilder { - key, tokio_handle, domain_nodes: Vec::new(), domain_nodes_exclusive: false, @@ -460,13 +454,13 @@ impl DomainNodeBuilder { pub async fn build_evm_node( self, role: Role, - domain_id: DomainId, + key: EcdsaKeyring, mock_consensus_node: &mut MockConsensusNode, ) -> EvmDomainNode { DomainNode::build( - domain_id, + EVM_DOMAIN_ID, self.tokio_handle, - self.key, + key, self.base_path, self.domain_nodes, self.domain_nodes_exclusive, @@ -480,8 +474,8 @@ impl DomainNodeBuilder { /// Build a evm domain node pub async fn build_auto_id_node( self, - key: Sr25519Keyring, role: Role, + key: Sr25519Keyring, mock_consensus_node: &mut MockConsensusNode, ) -> AutoIdDomainNode { DomainNode::build( diff --git a/domains/test/service/src/lib.rs b/domains/test/service/src/lib.rs index 899d8642f0..98e6f5b73c 100644 --- a/domains/test/service/src/lib.rs +++ b/domains/test/service/src/lib.rs @@ -53,9 +53,6 @@ use std::str::FromStr; pub use domain::*; pub use evm_domain_test_runtime; -/// The domain id of the genesis domain -pub const GENESIS_DOMAIN_ID: DomainId = DomainId::new(0u32); - /// The domain id of the evm domain pub const EVM_DOMAIN_ID: DomainId = DomainId::new(0u32); From 4f9f5edf7e6dc53beb82eed66e083e57da8ce9e6 Mon Sep 17 00:00:00 2001 From: linning Date: Fri, 12 Jul 2024 20:14:03 +0800 Subject: [PATCH 09/37] Add new auto-id bootstrap node to chain spec Signed-off-by: linning --- .../sc-subspace-chain-specs/res/chain-spec-raw-gemini-3h.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/sc-subspace-chain-specs/res/chain-spec-raw-gemini-3h.json b/crates/sc-subspace-chain-specs/res/chain-spec-raw-gemini-3h.json index f0b76c0f8d..4811f9593e 100644 --- a/crates/sc-subspace-chain-specs/res/chain-spec-raw-gemini-3h.json +++ b/crates/sc-subspace-chain-specs/res/chain-spec-raw-gemini-3h.json @@ -23,7 +23,8 @@ "/dns/bootstrap-2.nova.gemini-3h.subspace.network/tcp/30334/p2p/12D3KooWEYs5yikat5NanzN7c2Sb4ngxJoCro9vXMULM2ZYVWW9H" ], "1": [ - "/dns/bootstrap-0.autoid.gemini-3h.subspace.network/tcp/30334/p2p/12D3KooWFoiz2iTkmnnSqiL2oQRhGzaqgtUjYNz2jyWKQqgPXgx9" + "/dns/bootstrap-0.autoid.gemini-3h.subspace.network/tcp/30334/p2p/12D3KooWFoiz2iTkmnnSqiL2oQRhGzaqgtUjYNz2jyWKQqgPXgx9", + "/dns/bootstrap-1.autoid.gemini-3h.subspace.network/tcp/30334/p2p/12D3KooWDoPp1RLHUDX7YpKLc3yhymNGykV5f8ZdYWc1sjoBUrCX" ] }, "dsnBootstrapNodes": [ From 0028ee67d9bde28508309ca0a8992defb73aece5 Mon Sep 17 00:00:00 2001 From: linning Date: Mon, 15 Jul 2024 19:09:47 +0800 Subject: [PATCH 10/37] Remove unneeded comment Signed-off-by: linning --- domains/test/service/src/domain.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/domains/test/service/src/domain.rs b/domains/test/service/src/domain.rs index 628afae048..bd485e0612 100644 --- a/domains/test/service/src/domain.rs +++ b/domains/test/service/src/domain.rs @@ -269,7 +269,6 @@ where rpc_handlers, operator, tx_pool_sink: domain_message_sink, - // _phantom_data: Default::default(), } } From 5f5fd027eb2d5e728610fc78fd726a56d15d4169 Mon Sep 17 00:00:00 2001 From: linning Date: Mon, 15 Jul 2024 19:18:46 +0800 Subject: [PATCH 11/37] Disable flaky test test_stale_fork_xdm_true_invalid_fraud_proof Signed-off-by: linning --- domains/client/domain-operator/src/tests.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/domains/client/domain-operator/src/tests.rs b/domains/client/domain-operator/src/tests.rs index 95864dfb27..7e55cd6211 100644 --- a/domains/client/domain-operator/src/tests.rs +++ b/domains/client/domain-operator/src/tests.rs @@ -5241,7 +5241,10 @@ async fn test_xdm_false_invalid_fraud_proof() { assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); } +// TODO: this test is flaky and sometime hang forever in CI thus disable it temporary, +// do investigate and fix it #[tokio::test(flavor = "multi_thread")] +#[ignore] async fn test_stale_fork_xdm_true_invalid_fraud_proof() { let directory = TempDir::new().expect("Must be able to create temporary directory"); From 9f12887517cf03fb464a35aace4684989191826e Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Sat, 13 Jul 2024 08:43:32 +0300 Subject: [PATCH 12/37] Improve farm metrics API --- .../commands/cluster/farmer.rs | 6 +- .../src/bin/subspace-farmer/commands/farm.rs | 5 +- .../subspace-farmer/src/single_disk_farm.rs | 143 +++++++-------- .../src/single_disk_farm/metrics.rs | 163 +++++------------- 4 files changed, 124 insertions(+), 193 deletions(-) diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs index a44ee180ca..bf286452c8 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs @@ -8,6 +8,7 @@ use bytesize::ByteSize; use clap::Parser; use futures::stream::FuturesUnordered; use futures::{select, FutureExt, StreamExt}; +use parking_lot::Mutex; use prometheus_client::registry::Registry; use std::fs; use std::future::Future; @@ -25,7 +26,6 @@ use subspace_farmer::cluster::plotter::ClusterPlotter; use subspace_farmer::farm::Farm; use subspace_farmer::node_client::caching_proxy_node_client::CachingProxyNodeClient; use subspace_farmer::node_client::NodeClient; -use subspace_farmer::single_disk_farm::metrics::SingleDiskFarmMetrics; use subspace_farmer::single_disk_farm::{ SingleDiskFarm, SingleDiskFarmError, SingleDiskFarmOptions, }; @@ -241,7 +241,7 @@ where let faster_read_sector_record_chunks_mode_barrier = Arc::new(Barrier::new(disk_farms.len())); let faster_read_sector_record_chunks_mode_concurrency = Arc::new(Semaphore::new(1)); - let metrics = &SingleDiskFarmMetrics::new(registry); + let registry = &Mutex::new(registry); let mut farms = Vec::with_capacity(disk_farms.len()); let mut farms_stream = disk_farms @@ -283,7 +283,7 @@ where .read_sector_record_chunks_mode, faster_read_sector_record_chunks_mode_barrier, faster_read_sector_record_chunks_mode_concurrency, - metrics: Some(metrics.clone()), + registry: Some(registry), create, }, farm_index, diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index 464c8aa677..ab7e5c6d71 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -31,7 +31,6 @@ use subspace_farmer::node_client::rpc_node_client::RpcNodeClient; use subspace_farmer::node_client::NodeClient; use subspace_farmer::plotter::cpu::CpuPlotter; use subspace_farmer::single_disk_farm::identity::Identity; -use subspace_farmer::single_disk_farm::metrics::SingleDiskFarmMetrics; use subspace_farmer::single_disk_farm::{ SingleDiskFarm, SingleDiskFarmError, SingleDiskFarmOptions, }; @@ -526,7 +525,7 @@ where let (plotting_delay_senders, plotting_delay_receivers) = (0..disk_farms.len()) .map(|_| oneshot::channel()) .unzip::<_, _, Vec<_>, Vec<_>>(); - let metrics = &SingleDiskFarmMetrics::new(&mut prometheus_metrics_registry); + let registry = &Mutex::new(&mut prometheus_metrics_registry); let mut farms = Vec::with_capacity(disk_farms.len()); let mut farms_stream = disk_farms @@ -568,7 +567,7 @@ where .read_sector_record_chunks_mode, faster_read_sector_record_chunks_mode_barrier, faster_read_sector_record_chunks_mode_concurrency, - metrics: Some(metrics.clone()), + registry: Some(registry), create, }, farm_index, diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index f0dc157883..281a152304 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -6,7 +6,7 @@ pub mod farming; pub mod identity; -pub mod metrics; +mod metrics; pub mod piece_cache; pub mod piece_reader; pub mod plot_cache; @@ -50,6 +50,7 @@ use futures::stream::FuturesUnordered; use futures::{select, FutureExt, StreamExt}; use parity_scale_codec::{Decode, Encode}; use parking_lot::Mutex; +use prometheus_client::registry::Registry; use rand::prelude::*; use rayon::prelude::*; use rayon::{ThreadPoolBuildError, ThreadPoolBuilder}; @@ -296,7 +297,7 @@ impl PlotMetadataHeader { /// Options used to open single disk farm #[derive(Debug)] -pub struct SingleDiskFarmOptions +pub struct SingleDiskFarmOptions<'a, NC> where NC: Clone, { @@ -341,8 +342,8 @@ where pub faster_read_sector_record_chunks_mode_barrier: Arc, /// Limit concurrency of internal benchmarking between different farms pub faster_read_sector_record_chunks_mode_concurrency: Arc, - /// Single disk farm metrics - pub metrics: Option, + /// Prometheus registry + pub registry: Option<&'a Mutex<&'a mut Registry>>, /// Whether to create a farm if it doesn't yet exist pub create: bool, } @@ -774,7 +775,7 @@ impl SingleDiskFarm { /// Create new single disk farm instance pub async fn new( - options: SingleDiskFarmOptions, + options: SingleDiskFarmOptions<'_, NC>, farm_index: usize, ) -> Result where @@ -784,16 +785,49 @@ impl SingleDiskFarm { { let span = Span::current(); + let SingleDiskFarmOptions { + directory, + farmer_app_info, + allocated_space, + max_pieces_in_sector, + node_client, + reward_address, + plotter_legacy, + plotter, + kzg, + erasure_coding, + cache_percentage, + farming_thread_pool_size, + plotting_delay, + global_mutex, + disable_farm_locking, + read_sector_record_chunks_mode, + faster_read_sector_record_chunks_mode_barrier, + faster_read_sector_record_chunks_mode_concurrency, + registry, + create, + } = options; + let single_disk_farm_init_fut = tokio::task::spawn_blocking({ + let directory = directory.clone(); + let farmer_app_info = farmer_app_info.clone(); let span = span.clone(); move || { let _span_guard = span.enter(); - Self::init(&options).map(|single_disk_farm_init| (single_disk_farm_init, options)) + Self::init( + &directory, + &farmer_app_info, + allocated_space, + max_pieces_in_sector, + cache_percentage, + disable_farm_locking, + create, + ) } }); - let (single_disk_farm_init, options) = + let single_disk_farm_init = AsyncJoinOnDrop::new(single_disk_farm_init_fut, false).await??; let SingleDiskFarmInit { @@ -813,25 +847,6 @@ impl SingleDiskFarm { let pieces_in_sector = single_disk_farm_info.pieces_in_sector(); let sector_size = sector_size(pieces_in_sector); - let SingleDiskFarmOptions { - directory, - farmer_app_info, - node_client, - reward_address, - plotter_legacy, - plotter, - kzg, - erasure_coding, - farming_thread_pool_size, - plotting_delay, - global_mutex, - read_sector_record_chunks_mode, - faster_read_sector_record_chunks_mode_barrier, - faster_read_sector_record_chunks_mode_concurrency, - metrics, - .. - } = options; - let (error_sender, error_receiver) = oneshot::channel(); let error_sender = Arc::new(Mutex::new(Some(error_sender))); @@ -1213,36 +1228,25 @@ impl SingleDiskFarm { _single_disk_farm_info_lock: single_disk_farm_info_lock, }; - if let Some(metrics) = metrics { - farm.register_metrics(metrics); + if let Some(registry) = registry { + farm.register_metrics(SingleDiskFarmMetrics::new(*registry.lock(), farm.id())); } Ok(farm) } - fn init( - options: &SingleDiskFarmOptions, - ) -> Result - where - NC: Clone, - { - let SingleDiskFarmOptions { - directory, - farmer_app_info, - allocated_space, - max_pieces_in_sector, - cache_percentage, - disable_farm_locking, - create, - .. - } = options; - - let allocated_space = *allocated_space; - let max_pieces_in_sector = *max_pieces_in_sector; - + fn init( + directory: &PathBuf, + farmer_app_info: &FarmerAppInfo, + allocated_space: u64, + max_pieces_in_sector: u16, + cache_percentage: u8, + disable_farm_locking: bool, + create: bool, + ) -> Result { fs::create_dir_all(directory)?; - let identity = if *create { + let identity = if create { Identity::open_or_create(directory)? } else { Identity::open(directory)?.ok_or_else(|| { @@ -1334,7 +1338,7 @@ impl SingleDiskFarm { } }; - let single_disk_farm_info_lock = if *disable_farm_locking { + let single_disk_farm_info_lock = if disable_farm_locking { None } else { Some( @@ -1356,7 +1360,7 @@ impl SingleDiskFarm { let target_sector_count = { let potentially_plottable_space = allocated_space.saturating_sub(fixed_space_usage) / 100 - * (100 - u64::from(*cache_percentage)); + * (100 - u64::from(cache_percentage)); // Do the rounding to make sure we have exactly as much space as fits whole number of // sectors, account for disk sector size just in case (potentially_plottable_space - DISK_SECTOR_SIZE as u64) / single_sector_overhead @@ -1364,7 +1368,7 @@ impl SingleDiskFarm { if target_sector_count == 0 { let mut single_plot_with_cache_space = - single_sector_overhead.div_ceil(100 - u64::from(*cache_percentage)) * 100; + single_sector_overhead.div_ceil(100 - u64::from(cache_percentage)) * 100; // Cache must not be empty, ensure it contains at least one element even if // percentage-wise it will use more space if single_plot_with_cache_space - single_sector_overhead @@ -1385,7 +1389,7 @@ impl SingleDiskFarm { plot_file_size.div_ceil(DISK_SECTOR_SIZE as u64) * DISK_SECTOR_SIZE as u64; // Remaining space will be used for caching purposes - let cache_capacity = if *cache_percentage > 0 { + let cache_capacity = if cache_percentage > 0 { let cache_space = allocated_space - fixed_space_usage - plot_file_size @@ -1571,18 +1575,17 @@ impl SingleDiskFarm { } fn register_metrics(&self, metrics: SingleDiskFarmMetrics) { - let farm_id = *self.id(); + let metrics = Arc::new(metrics); let total_sector_count = self.total_sectors_count; let plotted_sectors_count = self.sectors_metadata.read_blocking().len() as SectorIndex; metrics.update_sectors_total( - &farm_id, total_sector_count - plotted_sectors_count, SectorState::NotPlotted, ); - metrics.update_sectors_total(&farm_id, plotted_sectors_count, SectorState::Plotted); + metrics.update_sectors_total(plotted_sectors_count, SectorState::Plotted); self.on_sector_update(Arc::new({ - let metrics = metrics.clone(); + let metrics = Arc::clone(&metrics); move |(_sector_index, sector_state)| match sector_state { SectorUpdate::Plotting(SectorPlottingDetails::Starting { .. }) => { @@ -1592,36 +1595,36 @@ impl SingleDiskFarm { metrics.sector_downloading.inc(); } SectorUpdate::Plotting(SectorPlottingDetails::Downloaded(time)) => { - metrics.observe_sector_downloading_time(&farm_id, time); + metrics.sector_downloading_time.observe(time.as_secs_f64()); metrics.sector_downloaded.inc(); } SectorUpdate::Plotting(SectorPlottingDetails::Encoding) => { metrics.sector_encoding.inc(); } SectorUpdate::Plotting(SectorPlottingDetails::Encoded(time)) => { - metrics.observe_sector_encoding_time(&farm_id, time); + metrics.sector_encoding_time.observe(time.as_secs_f64()); metrics.sector_encoded.inc(); } SectorUpdate::Plotting(SectorPlottingDetails::Writing) => { metrics.sector_writing.inc(); } SectorUpdate::Plotting(SectorPlottingDetails::Written(time)) => { - metrics.observe_sector_writing_time(&farm_id, time); + metrics.sector_writing_time.observe(time.as_secs_f64()); metrics.sector_written.inc(); } SectorUpdate::Plotting(SectorPlottingDetails::Finished { time, .. }) => { - metrics.observe_sector_plotting_time(&farm_id, time); + metrics.sector_plotting_time.observe(time.as_secs_f64()); metrics.sector_plotted.inc(); - metrics.update_sector_state(&farm_id, SectorState::Plotted); + metrics.update_sector_state(SectorState::Plotted); } SectorUpdate::Plotting(SectorPlottingDetails::Error(_)) => { metrics.sector_plotting_error.inc(); } SectorUpdate::Expiration(SectorExpirationDetails::AboutToExpire) => { - metrics.update_sector_state(&farm_id, SectorState::AboutToExpire); + metrics.update_sector_state(SectorState::AboutToExpire); } SectorUpdate::Expiration(SectorExpirationDetails::Expired) => { - metrics.update_sector_state(&farm_id, SectorState::Expired); + metrics.update_sector_state(SectorState::Expired); } SectorUpdate::Expiration(SectorExpirationDetails::Determined { .. }) => { // Not interested in here @@ -1633,17 +1636,15 @@ impl SingleDiskFarm { self.on_farming_notification(Arc::new( move |farming_notification| match farming_notification { FarmingNotification::Auditing(auditing_details) => { - metrics.observe_auditing_time(&farm_id, &auditing_details.time); + metrics + .auditing_time + .observe(auditing_details.time.as_secs_f64()); } FarmingNotification::Proving(proving_details) => { - metrics.observe_proving_time( - &farm_id, - &proving_details.time, - proving_details.result, - ); + metrics.observe_proving_time(&proving_details.time, proving_details.result); } FarmingNotification::NonFatalError(error) => { - metrics.note_farming_error(&farm_id, error); + metrics.note_farming_error(error); } }, )) diff --git a/crates/subspace-farmer/src/single_disk_farm/metrics.rs b/crates/subspace-farmer/src/single_disk_farm/metrics.rs index 020899fd83..1e79ffbcc6 100644 --- a/crates/subspace-farmer/src/single_disk_farm/metrics.rs +++ b/crates/subspace-farmer/src/single_disk_farm/metrics.rs @@ -32,16 +32,16 @@ impl fmt::Display for SectorState { } /// Metrics for single disk farm -#[derive(Debug, Clone)] -pub struct SingleDiskFarmMetrics { - auditing_time: Family, Histogram>, - proving_time: Family, Histogram>, - farming_errors: Family, Counter>, - sector_downloading_time: Family, Histogram>, - sector_encoding_time: Family, Histogram>, - sector_writing_time: Family, Histogram>, - sector_plotting_time: Family, Histogram>, - sectors_total: Family, Gauge>, +#[derive(Debug)] +pub(super) struct SingleDiskFarmMetrics { + pub(super) auditing_time: Histogram, + proving_time: Family, Histogram>, + farming_errors: Family, Counter>, + pub(super) sector_downloading_time: Histogram, + pub(super) sector_encoding_time: Histogram, + pub(super) sector_writing_time: Histogram, + pub(super) sector_plotting_time: Histogram, + sectors_total: Family, Gauge>, pub(super) sector_downloading: Counter, pub(super) sector_downloaded: Counter, pub(super) sector_encoding: Counter, @@ -54,14 +54,13 @@ pub struct SingleDiskFarmMetrics { } impl SingleDiskFarmMetrics { - /// Create new instance (note that a single instance must be created and cloned instead of - /// creating multiple separate instances for different farm) - pub fn new(registry: &mut Registry) -> Self { - let sub_registry = registry.sub_registry_with_prefix("subspace_farmer"); + /// Create new instance for specified farm + pub(super) fn new(registry: &mut Registry, farm_id: &FarmId) -> Self { + let sub_registry = registry + .sub_registry_with_prefix("farm") + .sub_registry_with_label(("farm_id".into(), farm_id.to_string().into())); - let auditing_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0002, 2.0, 15)) - }); + let auditing_time = Histogram::new(exponential_buckets(0.0002, 2.0, 15)); sub_registry.register_with_unit( "auditing_time", @@ -81,7 +80,7 @@ impl SingleDiskFarmMetrics { proving_time.clone(), ); - let farming_errors = Family::<_, _>::new_with_constructor(Counter::<_, _>::default); + let farming_errors = Family::default(); sub_registry.register( "farming_errors", @@ -89,9 +88,7 @@ impl SingleDiskFarmMetrics { farming_errors.clone(), ); - let sector_downloading_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.1, 2.0, 15)) - }); + let sector_downloading_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); sub_registry.register_with_unit( "sector_downloading_time", @@ -100,9 +97,7 @@ impl SingleDiskFarmMetrics { sector_downloading_time.clone(), ); - let sector_encoding_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.1, 2.0, 15)) - }); + let sector_encoding_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); sub_registry.register_with_unit( "sector_encoding_time", @@ -111,9 +106,7 @@ impl SingleDiskFarmMetrics { sector_encoding_time.clone(), ); - let sector_writing_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0002, 2.0, 15)) - }); + let sector_writing_time = Histogram::new(exponential_buckets(0.0002, 2.0, 15)); sub_registry.register_with_unit( "sector_writing_time", @@ -122,9 +115,7 @@ impl SingleDiskFarmMetrics { sector_writing_time.clone(), ); - let sector_plotting_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.1, 2.0, 15)) - }); + let sector_plotting_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); sub_registry.register_with_unit( "sector_plotting_time", @@ -133,7 +124,7 @@ impl SingleDiskFarmMetrics { sector_plotting_time.clone(), ); - let sectors_total = Family::<_, _>::new_with_constructor(Gauge::<_, _>::default); + let sectors_total = Family::default(); sub_registry.register_with_unit( "sectors_total", @@ -142,7 +133,7 @@ impl SingleDiskFarmMetrics { sectors_total.clone(), ); - let sector_downloading = Counter::<_, _>::default(); + let sector_downloading = Counter::default(); sub_registry.register_with_unit( "sector_downloading_counter", @@ -151,7 +142,7 @@ impl SingleDiskFarmMetrics { sector_downloading.clone(), ); - let sector_downloaded = Counter::<_, _>::default(); + let sector_downloaded = Counter::default(); sub_registry.register_with_unit( "sector_downloaded_counter", @@ -160,7 +151,7 @@ impl SingleDiskFarmMetrics { sector_downloaded.clone(), ); - let sector_encoding = Counter::<_, _>::default(); + let sector_encoding = Counter::default(); sub_registry.register_with_unit( "sector_encoding_counter", @@ -169,7 +160,7 @@ impl SingleDiskFarmMetrics { sector_encoding.clone(), ); - let sector_encoded = Counter::<_, _>::default(); + let sector_encoded = Counter::default(); sub_registry.register_with_unit( "sector_encoded_counter", @@ -178,7 +169,7 @@ impl SingleDiskFarmMetrics { sector_encoded.clone(), ); - let sector_writing = Counter::<_, _>::default(); + let sector_writing = Counter::default(); sub_registry.register_with_unit( "sector_writing_counter", @@ -187,7 +178,7 @@ impl SingleDiskFarmMetrics { sector_writing.clone(), ); - let sector_written = Counter::<_, _>::default(); + let sector_written = Counter::default(); sub_registry.register_with_unit( "sector_written_counter", @@ -196,7 +187,7 @@ impl SingleDiskFarmMetrics { sector_written.clone(), ); - let sector_plotting = Counter::<_, _>::default(); + let sector_plotting = Counter::default(); sub_registry.register_with_unit( "sector_plotting_counter", @@ -205,7 +196,7 @@ impl SingleDiskFarmMetrics { sector_plotting.clone(), ); - let sector_plotted = Counter::<_, _>::default(); + let sector_plotted = Counter::default(); sub_registry.register_with_unit( "sector_plotted_counter", @@ -214,7 +205,7 @@ impl SingleDiskFarmMetrics { sector_plotted.clone(), ); - let sector_plotting_error = Counter::<_, _>::default(); + let sector_plotting_error = Counter::default(); sub_registry.register_with_unit( "sector_plotting_error_counter", @@ -244,55 +235,27 @@ impl SingleDiskFarmMetrics { } } - pub(super) fn observe_auditing_time(&self, farm_id: &FarmId, time: &Duration) { - self.auditing_time - .get_or_create(&vec![("farm_id".to_string(), farm_id.to_string())]) - .observe(time.as_secs_f64()); - } - - pub(super) fn observe_proving_time( - &self, - farm_id: &FarmId, - time: &Duration, - result: ProvingResult, - ) { + pub(super) fn observe_proving_time(&self, time: &Duration, result: ProvingResult) { self.proving_time - .get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("result".to_string(), result.to_string()), - ]) + .get_or_create(&vec![("result", result.to_string())]) .observe(time.as_secs_f64()); } - pub(super) fn note_farming_error(&self, farm_id: &FarmId, error: &FarmingError) { + pub(super) fn note_farming_error(&self, error: &FarmingError) { self.farming_errors - .get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("error".to_string(), error.str_variant().to_string()), - ]) + .get_or_create(&vec![("error", error.str_variant().to_string())]) .inc(); } - pub(super) fn update_sectors_total( - &self, - farm_id: &FarmId, - sectors: SectorIndex, - state: SectorState, - ) { + pub(super) fn update_sectors_total(&self, sectors: SectorIndex, state: SectorState) { self.sectors_total - .get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("state".to_string(), state.to_string()), - ]) + .get_or_create(&vec![("state", state.to_string())]) .set(i64::from(sectors)); } - pub(super) fn update_sector_state(&self, farm_id: &FarmId, state: SectorState) { + pub(super) fn update_sector_state(&self, state: SectorState) { self.sectors_total - .get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("state".to_string(), state.to_string()), - ]) + .get_or_create(&vec![("state", state.to_string())]) .inc(); match state { SectorState::NotPlotted => { @@ -302,10 +265,9 @@ impl SingleDiskFarmMetrics { // Separate blocks in because of mutex guard returned by `get_or_create` resulting // in deadlock otherwise { - let not_plotted_sectors = self.sectors_total.get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("state".to_string(), SectorState::NotPlotted.to_string()), - ]); + let not_plotted_sectors = self + .sectors_total + .get_or_create(&vec![("state", SectorState::NotPlotted.to_string())]); if not_plotted_sectors.get() > 0 { // Initial plotting not_plotted_sectors.dec(); @@ -313,10 +275,9 @@ impl SingleDiskFarmMetrics { } } { - let expired_sectors = self.sectors_total.get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("state".to_string(), SectorState::Expired.to_string()), - ]); + let expired_sectors = self + .sectors_total + .get_or_create(&vec![("state", SectorState::Expired.to_string())]); if expired_sectors.get() > 0 { // Replaced expired sector expired_sectors.dec(); @@ -325,44 +286,14 @@ impl SingleDiskFarmMetrics { } // Replaced about to expire sector self.sectors_total - .get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("state".to_string(), SectorState::AboutToExpire.to_string()), - ]) + .get_or_create(&vec![("state", SectorState::AboutToExpire.to_string())]) .dec(); } SectorState::AboutToExpire | SectorState::Expired => { self.sectors_total - .get_or_create(&vec![ - ("farm_id".to_string(), farm_id.to_string()), - ("state".to_string(), SectorState::Plotted.to_string()), - ]) + .get_or_create(&vec![("state", SectorState::Plotted.to_string())]) .dec(); } } } - - pub(super) fn observe_sector_downloading_time(&self, farm_id: &FarmId, time: &Duration) { - self.sector_downloading_time - .get_or_create(&vec![("farm_id".to_string(), farm_id.to_string())]) - .observe(time.as_secs_f64()); - } - - pub(super) fn observe_sector_encoding_time(&self, farm_id: &FarmId, time: &Duration) { - self.sector_encoding_time - .get_or_create(&vec![("farm_id".to_string(), farm_id.to_string())]) - .observe(time.as_secs_f64()); - } - - pub(super) fn observe_sector_writing_time(&self, farm_id: &FarmId, time: &Duration) { - self.sector_writing_time - .get_or_create(&vec![("farm_id".to_string(), farm_id.to_string())]) - .observe(time.as_secs_f64()); - } - - pub(super) fn observe_sector_plotting_time(&self, farm_id: &FarmId, time: &Duration) { - self.sector_plotting_time - .get_or_create(&vec![("farm_id".to_string(), farm_id.to_string())]) - .observe(time.as_secs_f64()); - } } From 21057538cbd9228780157005b78463cea542424e Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 15 Jul 2024 14:17:19 +0300 Subject: [PATCH 13/37] Integrate farm metrics into lower-level code --- .../subspace-farmer/src/single_disk_farm.rs | 99 +++---------------- .../src/single_disk_farm/farming.rs | 70 ++++++++----- .../src/single_disk_farm/metrics.rs | 19 +++- .../src/single_disk_farm/plotting.rs | 86 +++++++++++++--- 4 files changed, 150 insertions(+), 124 deletions(-) diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index 281a152304..72c64bd62e 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -18,7 +18,7 @@ pub mod unbuffered_io_file_windows; use crate::disk_piece_cache::{DiskPieceCache, DiskPieceCacheError}; use crate::farm::{ Farm, FarmId, FarmingError, FarmingNotification, HandlerFn, PieceReader, PlottedSectors, - SectorExpirationDetails, SectorPlottingDetails, SectorUpdate, + SectorUpdate, }; use crate::node_client::NodeClient; use crate::plotter::Plotter; @@ -27,7 +27,7 @@ use crate::single_disk_farm::farming::{ farming, slot_notification_forwarder, FarmingOptions, PlotAudit, }; use crate::single_disk_farm::identity::{Identity, IdentityError}; -use crate::single_disk_farm::metrics::{SectorState, SingleDiskFarmMetrics}; +use crate::single_disk_farm::metrics::SingleDiskFarmMetrics; use crate::single_disk_farm::piece_cache::SingleDiskPieceCache; use crate::single_disk_farm::piece_reader::DiskPieceReader; use crate::single_disk_farm::plot_cache::DiskPlotCache; @@ -847,6 +847,15 @@ impl SingleDiskFarm { let pieces_in_sector = single_disk_farm_info.pieces_in_sector(); let sector_size = sector_size(pieces_in_sector); + let metrics = registry.map(|registry| { + Arc::new(SingleDiskFarmMetrics::new( + *registry.lock(), + single_disk_farm_info.id(), + target_sector_count, + sectors_metadata.read_blocking().len() as SectorIndex, + )) + }); + let (error_sender, error_receiver) = oneshot::channel(); let error_sender = Arc::new(Mutex::new(Some(error_sender))); @@ -939,6 +948,7 @@ impl SingleDiskFarm { let error_sender = Arc::clone(&error_sender); let span = span.clone(); let global_mutex = Arc::clone(&global_mutex); + let metrics = metrics.clone(); move || { let _span_guard = span.enter(); @@ -963,6 +973,7 @@ impl SingleDiskFarm { handlers: &handlers, global_mutex: &global_mutex, plotter, + metrics, }, }; @@ -1024,6 +1035,7 @@ impl SingleDiskFarm { sectors_metadata: Arc::clone(§ors_metadata), sectors_to_plot_sender, new_segment_processing_delay: NEW_SEGMENT_PROCESSING_DELAY, + metrics: metrics.clone(), }; tasks.push(Box::pin(plotting_scheduler(plotting_scheduler_options))); @@ -1075,6 +1087,7 @@ impl SingleDiskFarm { thread_pool: farming_thread_pool, read_sector_record_chunks_mode, global_mutex, + metrics, }; match single_disk_farm_info { SingleDiskFarmInfo::V0 { .. } => { @@ -1227,11 +1240,6 @@ impl SingleDiskFarm { stop_sender: Some(stop_sender), _single_disk_farm_info_lock: single_disk_farm_info_lock, }; - - if let Some(registry) = registry { - farm.register_metrics(SingleDiskFarmMetrics::new(*registry.lock(), farm.id())); - } - Ok(farm) } @@ -1574,83 +1582,6 @@ impl SingleDiskFarm { }) } - fn register_metrics(&self, metrics: SingleDiskFarmMetrics) { - let metrics = Arc::new(metrics); - - let total_sector_count = self.total_sectors_count; - let plotted_sectors_count = self.sectors_metadata.read_blocking().len() as SectorIndex; - metrics.update_sectors_total( - total_sector_count - plotted_sectors_count, - SectorState::NotPlotted, - ); - metrics.update_sectors_total(plotted_sectors_count, SectorState::Plotted); - self.on_sector_update(Arc::new({ - let metrics = Arc::clone(&metrics); - - move |(_sector_index, sector_state)| match sector_state { - SectorUpdate::Plotting(SectorPlottingDetails::Starting { .. }) => { - metrics.sector_plotting.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Downloading) => { - metrics.sector_downloading.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Downloaded(time)) => { - metrics.sector_downloading_time.observe(time.as_secs_f64()); - metrics.sector_downloaded.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Encoding) => { - metrics.sector_encoding.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Encoded(time)) => { - metrics.sector_encoding_time.observe(time.as_secs_f64()); - metrics.sector_encoded.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Writing) => { - metrics.sector_writing.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Written(time)) => { - metrics.sector_writing_time.observe(time.as_secs_f64()); - metrics.sector_written.inc(); - } - SectorUpdate::Plotting(SectorPlottingDetails::Finished { time, .. }) => { - metrics.sector_plotting_time.observe(time.as_secs_f64()); - metrics.sector_plotted.inc(); - metrics.update_sector_state(SectorState::Plotted); - } - SectorUpdate::Plotting(SectorPlottingDetails::Error(_)) => { - metrics.sector_plotting_error.inc(); - } - SectorUpdate::Expiration(SectorExpirationDetails::AboutToExpire) => { - metrics.update_sector_state(SectorState::AboutToExpire); - } - SectorUpdate::Expiration(SectorExpirationDetails::Expired) => { - metrics.update_sector_state(SectorState::Expired); - } - SectorUpdate::Expiration(SectorExpirationDetails::Determined { .. }) => { - // Not interested in here - } - } - })) - .detach(); - - self.on_farming_notification(Arc::new( - move |farming_notification| match farming_notification { - FarmingNotification::Auditing(auditing_details) => { - metrics - .auditing_time - .observe(auditing_details.time.as_secs_f64()); - } - FarmingNotification::Proving(proving_details) => { - metrics.observe_proving_time(&proving_details.time, proving_details.result); - } - FarmingNotification::NonFatalError(error) => { - metrics.note_farming_error(error); - } - }, - )) - .detach(); - } - /// Collect summary of single disk farm for presentational purposes pub fn collect_summary(directory: PathBuf) -> SingleDiskFarmSummary { let single_disk_farm_info = match SingleDiskFarmInfo::load_from(&directory) { diff --git a/crates/subspace-farmer/src/single_disk_farm/farming.rs b/crates/subspace-farmer/src/single_disk_farm/farming.rs index e24a74c086..0b616a1baa 100644 --- a/crates/subspace-farmer/src/single_disk_farm/farming.rs +++ b/crates/subspace-farmer/src/single_disk_farm/farming.rs @@ -9,6 +9,7 @@ use crate::farm::{ AuditingDetails, FarmingError, FarmingNotification, ProvingDetails, ProvingResult, }; use crate::node_client::NodeClient; +use crate::single_disk_farm::metrics::SingleDiskFarmMetrics; use crate::single_disk_farm::Handlers; use async_lock::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; use futures::channel::mpsc; @@ -201,6 +202,7 @@ pub(super) struct FarmingOptions { pub(super) thread_pool: ThreadPool, pub(super) read_sector_record_chunks_mode: ReadSectorRecordChunksMode, pub(super) global_mutex: Arc>, + pub(super) metrics: Option>, } /// Starts farming process. @@ -229,6 +231,7 @@ where thread_pool, read_sector_record_chunks_mode, global_mutex, + metrics, } = farming_options; let farmer_app_info = node_client @@ -285,12 +288,18 @@ where a_solution_distance.cmp(&b_solution_distance) }); - handlers - .farming_notification - .call_simple(&FarmingNotification::Auditing(AuditingDetails { - sectors_count: sectors_metadata.len() as SectorIndex, - time: start.elapsed(), - })); + { + let time = start.elapsed(); + if let Some(metrics) = &metrics { + metrics.auditing_time.observe(time.as_secs_f64()); + } + handlers + .farming_notification + .call_simple(&FarmingNotification::Auditing(AuditingDetails { + sectors_count: sectors_metadata.len() as SectorIndex, + time, + })); + } // Take mutex and hold until proving end to make sure nothing else major happens at the // same time @@ -320,20 +329,26 @@ where debug!(%slot, %sector_index, "Solution found"); trace!(?solution, "Solution found"); - if start.elapsed() >= farming_timeout { - handlers - .farming_notification - .call_simple(&FarmingNotification::Proving(ProvingDetails { - result: ProvingResult::Timeout, - time: start.elapsed(), - })); - warn!( - %slot, - %sector_index, - "Proving for solution skipped due to farming time limit", - ); - - break 'solutions_processing; + { + let time = start.elapsed(); + if time >= farming_timeout { + if let Some(metrics) = &metrics { + metrics.observe_proving_time(&time, ProvingResult::Timeout); + } + handlers.farming_notification.call_simple( + &FarmingNotification::Proving(ProvingDetails { + result: ProvingResult::Timeout, + time, + }), + ); + warn!( + %slot, + %sector_index, + "Proving for solution skipped due to farming time limit", + ); + + break 'solutions_processing; + } } let response = SolutionResponse { @@ -344,11 +359,15 @@ where handlers.solution.call_simple(&response); if let Err(error) = node_client.submit_solution_response(response).await { + let time = start.elapsed(); + if let Some(metrics) = &metrics { + metrics.observe_proving_time(&time, ProvingResult::Rejected); + } handlers .farming_notification .call_simple(&FarmingNotification::Proving(ProvingDetails { result: ProvingResult::Rejected, - time: start.elapsed(), + time, })); warn!( %slot, @@ -359,11 +378,15 @@ where break 'solutions_processing; } + let time = start.elapsed(); + if let Some(metrics) = &metrics { + metrics.observe_proving_time(&time, ProvingResult::Success); + } handlers .farming_notification .call_simple(&FarmingNotification::Proving(ProvingDetails { result: ProvingResult::Success, - time: start.elapsed(), + time, })); start = Instant::now(); } @@ -386,6 +409,9 @@ where "Non-fatal farming error" ); + if let Some(metrics) = &metrics { + metrics.note_farming_error(&error); + } handlers .farming_notification .call_simple(&FarmingNotification::NonFatalError(Arc::new(error))); diff --git a/crates/subspace-farmer/src/single_disk_farm/metrics.rs b/crates/subspace-farmer/src/single_disk_farm/metrics.rs index 1e79ffbcc6..93e435e541 100644 --- a/crates/subspace-farmer/src/single_disk_farm/metrics.rs +++ b/crates/subspace-farmer/src/single_disk_farm/metrics.rs @@ -55,7 +55,12 @@ pub(super) struct SingleDiskFarmMetrics { impl SingleDiskFarmMetrics { /// Create new instance for specified farm - pub(super) fn new(registry: &mut Registry, farm_id: &FarmId) -> Self { + pub(super) fn new( + registry: &mut Registry, + farm_id: &FarmId, + total_sectors_count: SectorIndex, + plotted_sectors_count: SectorIndex, + ) -> Self { let sub_registry = registry .sub_registry_with_prefix("farm") .sub_registry_with_label(("farm_id".into(), farm_id.to_string().into())); @@ -214,7 +219,7 @@ impl SingleDiskFarmMetrics { sector_plotting_error.clone(), ); - Self { + let metrics = Self { auditing_time, proving_time, farming_errors, @@ -232,7 +237,15 @@ impl SingleDiskFarmMetrics { sector_plotting, sector_plotted, sector_plotting_error, - } + }; + + metrics.update_sectors_total( + total_sectors_count - plotted_sectors_count, + SectorState::NotPlotted, + ); + metrics.update_sectors_total(plotted_sectors_count, SectorState::Plotted); + + metrics } pub(super) fn observe_proving_time(&self, time: &Duration, result: ProvingResult) { diff --git a/crates/subspace-farmer/src/single_disk_farm/plotting.rs b/crates/subspace-farmer/src/single_disk_farm/plotting.rs index b496a7b5a0..1e0f7d4615 100644 --- a/crates/subspace-farmer/src/single_disk_farm/plotting.rs +++ b/crates/subspace-farmer/src/single_disk_farm/plotting.rs @@ -1,6 +1,7 @@ use crate::farm::{SectorExpirationDetails, SectorPlottingDetails, SectorUpdate}; use crate::node_client::{Error as NodeClientError, NodeClient}; use crate::plotter::{Plotter, SectorPlottingProgress}; +use crate::single_disk_farm::metrics::{SectorState, SingleDiskFarmMetrics}; #[cfg(windows)] use crate::single_disk_farm::unbuffered_io_file_windows::UnbufferedIoFileWindows; use crate::single_disk_farm::{ @@ -100,6 +101,7 @@ pub(super) struct SectorPlottingOptions<'a, NC> { pub(super) handlers: &'a Handlers, pub(super) global_mutex: &'a AsyncMutex<()>, pub(super) plotter: Arc, + pub(super) metrics: Option>, } pub(super) struct PlottingOptions<'a, NC> { @@ -292,6 +294,7 @@ where handlers, global_mutex, plotter, + metrics, } = sector_plotting_options; let SectorToPlot { @@ -309,6 +312,9 @@ where .cloned(); let replotting = maybe_old_sector_metadata.is_some(); + if let Some(metrics) = metrics { + metrics.sector_plotting.inc(); + } let sector_state = SectorUpdate::Plotting(SectorPlottingDetails::Starting { progress, replotting, @@ -392,6 +398,7 @@ where sectors_being_modified, global_mutex, progress_receiver, + metrics, ) .await? { @@ -464,10 +471,16 @@ where let sector_metadata = plotted_sector.sector_metadata.clone(); + let time = start.elapsed(); + if let Some(metrics) = metrics { + metrics.sector_plotting_time.observe(time.as_secs_f64()); + metrics.sector_plotted.inc(); + metrics.update_sector_state(SectorState::Plotted); + } let sector_state = SectorUpdate::Plotting(SectorPlottingDetails::Finished { plotted_sector, old_plotted_sector: maybe_old_plotted_sector, - time: start.elapsed(), + time, }); handlers .sector_update @@ -496,30 +509,45 @@ async fn plot_single_sector_internal( sectors_being_modified: &AsyncRwLock>, global_mutex: &AsyncMutex<()>, mut progress_receiver: mpsc::Receiver, + metrics: &Option>, ) -> Result, PlottingError> { // Process plotting progress notifications let progress_processor_fut = async { while let Some(progress) = progress_receiver.next().await { match progress { SectorPlottingProgress::Downloading => { + if let Some(metrics) = metrics { + metrics.sector_downloading.inc(); + } handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Downloading), )); } SectorPlottingProgress::Downloaded(time) => { + if let Some(metrics) = metrics { + metrics.sector_downloading_time.observe(time.as_secs_f64()); + metrics.sector_downloaded.inc(); + } handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Downloaded(time)), )); } SectorPlottingProgress::Encoding => { + if let Some(metrics) = metrics { + metrics.sector_encoding.inc(); + } handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Encoding), )); } SectorPlottingProgress::Encoded(time) => { + if let Some(metrics) = metrics { + metrics.sector_encoding_time.observe(time.as_secs_f64()); + metrics.sector_encoded.inc(); + } handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Encoded(time)), @@ -533,6 +561,9 @@ async fn plot_single_sector_internal( return Ok((plotted_sector, sector)); } SectorPlottingProgress::Error { error } => { + if let Some(metrics) = metrics { + metrics.sector_plotting_error.inc(); + } handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Error(error.clone())), @@ -559,6 +590,9 @@ async fn plot_single_sector_internal( // Take mutex briefly to make sure writing is allowed right now global_mutex.lock().await; + if let Some(metrics) = metrics { + metrics.sector_writing.inc(); + } handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Writing), @@ -630,9 +664,14 @@ async fn plot_single_sector_internal( })??; } + let time = start.elapsed(); + if let Some(metrics) = metrics { + metrics.sector_writing_time.observe(time.as_secs_f64()); + metrics.sector_written.inc(); + } handlers.sector_update.call_simple(&( sector_index, - SectorUpdate::Plotting(SectorPlottingDetails::Written(start.elapsed())), + SectorUpdate::Plotting(SectorPlottingDetails::Written(time)), )); } @@ -652,6 +691,7 @@ pub(super) struct PlottingSchedulerOptions { // Delay between segment header being acknowledged by farmer and potentially triggering // replotting pub(super) new_segment_processing_delay: Duration, + pub(super) metrics: Option>, } pub(super) async fn plotting_scheduler( @@ -671,6 +711,7 @@ where sectors_metadata, sectors_to_plot_sender, new_segment_processing_delay, + metrics, } = plotting_scheduler_options; // Create a proxy channel with atomically updatable last archived segment that @@ -706,6 +747,7 @@ where sectors_metadata, archived_segments_receiver, sectors_to_plot_sender, + &metrics, ); select! { @@ -770,6 +812,7 @@ async fn send_plotting_notifications( sectors_metadata: Arc>>, mut archived_segments_receiver: watch::Receiver, mut sectors_to_plot_sender: mpsc::Sender, + metrics: &Option>, ) -> Result<(), BackgroundTaskError> where NC: NodeClient, @@ -828,14 +871,20 @@ where "Sector expires soon #1, scheduling replotting" ); - handlers.sector_update.call_simple(&( - sector_index, - SectorUpdate::Expiration(if expires_at <= segment_index { - SectorExpirationDetails::Expired - } else { - SectorExpirationDetails::AboutToExpire - }), - )); + let expiration_details = if expires_at <= segment_index { + if let Some(metrics) = metrics { + metrics.update_sector_state(SectorState::Expired); + } + SectorExpirationDetails::Expired + } else { + if let Some(metrics) = metrics { + metrics.update_sector_state(SectorState::AboutToExpire); + } + SectorExpirationDetails::AboutToExpire + }; + handlers + .sector_update + .call_simple(&(sector_index, SectorUpdate::Expiration(expiration_details))); // Time to replot sectors_to_replot.push(SectorToReplot { @@ -898,13 +947,20 @@ where "Sector expires soon #2, scheduling replotting" ); + let expiration_details = if expires_at <= segment_index { + if let Some(metrics) = metrics { + metrics.update_sector_state(SectorState::Expired); + } + SectorExpirationDetails::Expired + } else { + if let Some(metrics) = metrics { + metrics.update_sector_state(SectorState::AboutToExpire); + } + SectorExpirationDetails::AboutToExpire + }; handlers.sector_update.call_simple(&( sector_index, - SectorUpdate::Expiration(if expires_at <= segment_index { - SectorExpirationDetails::Expired - } else { - SectorExpirationDetails::AboutToExpire - }), + SectorUpdate::Expiration(expiration_details), )); // Time to replot From 293404bdf9c463ef286d35134fb8c222f732ba71 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 15 Jul 2024 13:41:42 +0300 Subject: [PATCH 14/37] Add CPU plotter metrics --- .../bin/subspace-farmer/commands/cluster.rs | 2 +- .../commands/cluster/plotter.rs | 6 +- .../src/bin/subspace-farmer/commands/farm.rs | 10 +- crates/subspace-farmer/src/plotter/cpu.rs | 59 +++++++ .../src/plotter/cpu/metrics.rs | 148 ++++++++++++++++++ 5 files changed, 217 insertions(+), 8 deletions(-) create mode 100644 crates/subspace-farmer/src/plotter/cpu/metrics.rs diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster.rs index 3c1acd9e0c..a776df6cc7 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster.rs @@ -110,7 +110,7 @@ where ) .await .map_err(|error| anyhow!("Failed to connect to NATS server: {error}"))?; - let mut registry = Registry::default(); + let mut registry = Registry::with_prefix("subspace_farmer"); let mut tasks = FuturesUnordered::new(); diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/plotter.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/plotter.rs index 1ed22af3b9..c355b1fe37 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/plotter.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/plotter.rs @@ -83,7 +83,7 @@ pub(super) struct PlotterArgs { pub(super) async fn plotter( nats_client: NatsClient, - _registry: &mut Registry, + registry: &mut Registry, plotter_args: PlotterArgs, ) -> anyhow::Result>>>> where @@ -169,6 +169,7 @@ where Arc::clone(&global_mutex), kzg.clone(), erasure_coding.clone(), + Some(registry), )); let modern_cpu_plotter = Arc::new(CpuPlotter::<_, PosTable>::new( piece_getter.clone(), @@ -178,10 +179,9 @@ where Arc::clone(&global_mutex), kzg.clone(), erasure_coding.clone(), + Some(registry), )); - // TODO: Metrics - Ok(Box::pin(async move { select! { result = plotter_service(&nats_client, &legacy_cpu_plotter, false).fuse() => { diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index ab7e5c6d71..c6f7bef455 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -341,7 +341,7 @@ where let (farmer_cache, farmer_cache_worker) = FarmerCache::new(node_client.clone(), peer_id); // Metrics - let mut prometheus_metrics_registry = Registry::default(); + let mut registry = Registry::with_prefix("subspace_farmer"); let should_start_prometheus_server = !prometheus_listen_on.is_empty(); let node_client = CachingProxyNodeClient::new(node_client) @@ -363,7 +363,7 @@ where Arc::downgrade(&plotted_pieces), node_client.clone(), farmer_cache.clone(), - should_start_prometheus_server.then_some(&mut prometheus_metrics_registry), + should_start_prometheus_server.then_some(&mut registry), ) .map_err(|error| anyhow!("Failed to configure networking: {error}"))? }; @@ -506,6 +506,7 @@ where Arc::clone(&global_mutex), kzg.clone(), erasure_coding.clone(), + Some(&mut registry), )); let modern_cpu_plotter = Arc::new(CpuPlotter::<_, PosTable>::new( piece_getter.clone(), @@ -515,6 +516,7 @@ where Arc::clone(&global_mutex), kzg.clone(), erasure_coding.clone(), + Some(&mut registry), )); let (farms, plotting_delay_senders) = { @@ -525,7 +527,7 @@ where let (plotting_delay_senders, plotting_delay_receivers) = (0..disk_farms.len()) .map(|_| oneshot::channel()) .unzip::<_, _, Vec<_>, Vec<_>>(); - let registry = &Mutex::new(&mut prometheus_metrics_registry); + let registry = &Mutex::new(&mut registry); let mut farms = Vec::with_capacity(disk_farms.len()); let mut farms_stream = disk_farms @@ -749,7 +751,7 @@ where let _prometheus_worker = if should_start_prometheus_server { let prometheus_task = start_prometheus_metrics_server( prometheus_listen_on, - RegistryAdapter::PrometheusClient(prometheus_metrics_registry), + RegistryAdapter::PrometheusClient(registry), )?; let join_handle = tokio::spawn(prometheus_task); diff --git a/crates/subspace-farmer/src/plotter/cpu.rs b/crates/subspace-farmer/src/plotter/cpu.rs index e6ac3ad3fe..37492943a0 100644 --- a/crates/subspace-farmer/src/plotter/cpu.rs +++ b/crates/subspace-farmer/src/plotter/cpu.rs @@ -1,5 +1,8 @@ //! CPU plotter +pub mod metrics; + +use crate::plotter::cpu::metrics::CpuPlotterMetrics; use crate::plotter::{Plotter, SectorPlottingProgress}; use crate::thread_pool_manager::PlottingThreadPoolManager; use crate::utils::AsyncJoinOnDrop; @@ -9,6 +12,8 @@ use event_listener_primitives::{Bag, HandlerId}; use futures::channel::mpsc; use futures::stream::FuturesUnordered; use futures::{select, stream, FutureExt, Sink, SinkExt, StreamExt}; +use prometheus_client::registry::Registry; +use std::any::type_name; use std::error::Error; use std::fmt; use std::future::pending; @@ -52,6 +57,7 @@ pub struct CpuPlotter { tasks_sender: mpsc::Sender>, _background_tasks: AsyncJoinOnDrop<()>, abort_early: Arc, + metrics: Option>, _phantom: PhantomData, } @@ -105,6 +111,7 @@ where public_key, sector_index, handlers: Arc::clone(&self.handlers), + metrics: self.metrics.clone(), }; progress_updater @@ -171,6 +178,7 @@ where PosTable: Table, { /// Create new instance + #[allow(clippy::too_many_arguments)] pub fn new( piece_getter: PG, downloading_semaphore: Arc, @@ -179,6 +187,7 @@ where global_mutex: Arc>, kzg: Kzg, erasure_coding: ErasureCoding, + registry: Option<&mut Registry>, ) -> Self { let (tasks_sender, mut tasks_receiver) = mpsc::channel(1); @@ -209,6 +218,13 @@ where ); let abort_early = Arc::new(AtomicBool::new(false)); + let metrics = registry.map(|registry| { + Arc::new(CpuPlotterMetrics::new( + registry, + type_name::(), + plotting_thread_pool_manager.thread_pool_pairs(), + )) + }); Self { piece_getter, @@ -222,6 +238,7 @@ where tasks_sender, _background_tasks: background_tasks, abort_early, + metrics, _phantom: PhantomData, } } @@ -249,10 +266,15 @@ where PS: Sink + Unpin + Send + 'static, PS::Error: Error, { + if let Some(metrics) = &self.metrics { + metrics.sector_plotting.inc(); + } + let progress_updater = ProgressUpdater { public_key, sector_index, handlers: Arc::clone(&self.handlers), + metrics: self.metrics.clone(), }; let plotting_fut = { @@ -263,6 +285,7 @@ where let kzg = self.kzg.clone(); let erasure_coding = self.erasure_coding.clone(); let abort_early = Arc::clone(&self.abort_early); + let metrics = self.metrics.clone(); async move { // Downloading @@ -325,6 +348,9 @@ where // Plotting let (sector, plotted_sector) = { let thread_pools = plotting_thread_pool_manager.get_thread_pools().await; + if let Some(metrics) = &metrics { + metrics.plotting_capacity_used.inc(); + } let plotting_fn = || { let mut sector = Vec::new(); @@ -363,6 +389,9 @@ where ) .await { + if let Some(metrics) = &metrics { + metrics.plotting_capacity_used.dec(); + } return; } @@ -370,6 +399,10 @@ where let plotting_result = tokio::task::block_in_place(|| thread_pool.install(plotting_fn)); + drop(thread_pools); + if let Some(metrics) = &metrics { + metrics.plotting_capacity_used.dec(); + } match plotting_result { Ok(plotting_result) => { @@ -439,6 +472,7 @@ struct ProgressUpdater { public_key: PublicKey, sector_index: SectorIndex, handlers: Arc, + metrics: Option>, } impl ProgressUpdater { @@ -452,6 +486,31 @@ impl ProgressUpdater { PS: Sink + Unpin, PS::Error: Error, { + if let Some(metrics) = &self.metrics { + match &progress { + SectorPlottingProgress::Downloading => { + metrics.sector_downloading.inc(); + } + SectorPlottingProgress::Downloaded(time) => { + metrics.sector_downloading_time.observe(time.as_secs_f64()); + metrics.sector_downloaded.inc(); + } + SectorPlottingProgress::Encoding => { + metrics.sector_encoding.inc(); + } + SectorPlottingProgress::Encoded(time) => { + metrics.sector_encoding_time.observe(time.as_secs_f64()); + metrics.sector_encoded.inc(); + } + SectorPlottingProgress::Finished { time, .. } => { + metrics.sector_plotting_time.observe(time.as_secs_f64()); + metrics.sector_plotted.inc(); + } + SectorPlottingProgress::Error { .. } => { + metrics.sector_plotting_error.inc(); + } + } + } self.handlers.plotting_progress.call_simple( &self.public_key, &self.sector_index, diff --git a/crates/subspace-farmer/src/plotter/cpu/metrics.rs b/crates/subspace-farmer/src/plotter/cpu/metrics.rs new file mode 100644 index 0000000000..10f320c932 --- /dev/null +++ b/crates/subspace-farmer/src/plotter/cpu/metrics.rs @@ -0,0 +1,148 @@ +//! Metrics for CPU plotter + +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::registry::{Registry, Unit}; +use std::num::NonZeroUsize; +use std::sync::atomic::{AtomicI64, AtomicU64}; + +/// Metrics for CPU plotter +#[derive(Debug)] +pub(super) struct CpuPlotterMetrics { + pub(super) sector_downloading_time: Histogram, + pub(super) sector_encoding_time: Histogram, + pub(super) sector_plotting_time: Histogram, + pub(super) sector_downloading: Counter, + pub(super) sector_downloaded: Counter, + pub(super) sector_encoding: Counter, + pub(super) sector_encoded: Counter, + pub(super) sector_plotting: Counter, + pub(super) sector_plotted: Counter, + pub(super) sector_plotting_error: Counter, + pub(super) plotting_capacity_used: Gauge, +} + +impl CpuPlotterMetrics { + /// Create new instance + pub(super) fn new( + registry: &mut Registry, + subtype: &str, + total_capacity: NonZeroUsize, + ) -> Self { + let registry = registry + .sub_registry_with_prefix("plotter") + .sub_registry_with_label(("kind".into(), format!("cpu-{subtype}").into())); + + let sector_downloading_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); + registry.register_with_unit( + "sector_downloading_time", + "Sector downloading time", + Unit::Seconds, + sector_downloading_time.clone(), + ); + + let sector_encoding_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); + registry.register_with_unit( + "sector_encoding_time", + "Sector encoding time", + Unit::Seconds, + sector_encoding_time.clone(), + ); + + let sector_plotting_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); + registry.register_with_unit( + "sector_plotting_time", + "Sector plotting time", + Unit::Seconds, + sector_plotting_time.clone(), + ); + + let sector_downloading = Counter::default(); + registry.register_with_unit( + "sector_downloading_counter", + "Number of sectors being downloaded", + Unit::Other("sectors".to_string()), + sector_downloading.clone(), + ); + + let sector_downloaded = Counter::default(); + registry.register_with_unit( + "sector_downloaded_counter", + "Number of downloaded sectors", + Unit::Other("sectors".to_string()), + sector_downloaded.clone(), + ); + + let sector_encoding = Counter::default(); + registry.register_with_unit( + "sector_encoding_counter", + "Number of sectors being encoded", + Unit::Other("sectors".to_string()), + sector_encoding.clone(), + ); + + let sector_encoded = Counter::default(); + registry.register_with_unit( + "sector_encoded_counter", + "Number of encoded sectors", + Unit::Other("sectors".to_string()), + sector_encoded.clone(), + ); + + let sector_plotting = Counter::default(); + registry.register_with_unit( + "sector_plotting_counter", + "Number of sectors being plotted", + Unit::Other("sectors".to_string()), + sector_plotting.clone(), + ); + + let sector_plotted = Counter::default(); + registry.register_with_unit( + "sector_plotted_counter", + "Number of plotted sectors", + Unit::Other("sectors".to_string()), + sector_plotted.clone(), + ); + + let sector_plotting_error = Counter::default(); + registry.register_with_unit( + "sector_plotting_error_counter", + "Number of sector plotting failures", + Unit::Other("sectors".to_string()), + sector_plotting_error.clone(), + ); + + let plotting_capacity_total = Gauge::::default(); + plotting_capacity_total.set(total_capacity.get() as i64); + registry.register_with_unit( + "plotting_capacity_total", + "Plotting capacity total", + Unit::Other("sectors".to_string()), + plotting_capacity_total, + ); + + let plotting_capacity_used = Gauge::default(); + registry.register_with_unit( + "plotting_capacity_used", + "Plotting capacity used", + Unit::Other("sectors".to_string()), + plotting_capacity_used.clone(), + ); + + Self { + sector_downloading_time, + sector_encoding_time, + sector_plotting_time, + sector_downloading, + sector_downloaded, + sector_encoding, + sector_encoded, + sector_plotting, + sector_plotted, + sector_plotting_error, + plotting_capacity_used, + } + } +} From 77382e073093e545b4ee30a4bb5d4d2ade2adaf3 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 15 Jul 2024 14:23:35 +0300 Subject: [PATCH 15/37] Proving failure metric --- crates/subspace-farmer/src/farm.rs | 9 ++++++--- crates/subspace-farmer/src/single_disk_farm/farming.rs | 4 ++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/subspace-farmer/src/farm.rs b/crates/subspace-farmer/src/farm.rs index c92ecaa7cc..c755c2d343 100644 --- a/crates/subspace-farmer/src/farm.rs +++ b/crates/subspace-farmer/src/farm.rs @@ -228,15 +228,18 @@ pub enum ProvingResult { /// Managed to prove within time limit, but node rejected solution, likely due to timeout on its /// end Rejected, + /// Proving failed altogether + Failed, } impl fmt::Display for ProvingResult { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { - ProvingResult::Success => "Success", - ProvingResult::Timeout => "Timeout", - ProvingResult::Rejected => "Rejected", + Self::Success => "Success", + Self::Timeout => "Timeout", + Self::Rejected => "Rejected", + Self::Failed => "Failed", }) } } diff --git a/crates/subspace-farmer/src/single_disk_farm/farming.rs b/crates/subspace-farmer/src/single_disk_farm/farming.rs index 0b616a1baa..a3864f4eb4 100644 --- a/crates/subspace-farmer/src/single_disk_farm/farming.rs +++ b/crates/subspace-farmer/src/single_disk_farm/farming.rs @@ -318,6 +318,10 @@ where let solution = match maybe_solution { Ok(solution) => solution, Err(error) => { + if let Some(metrics) = &metrics { + metrics + .observe_proving_time(&start.elapsed(), ProvingResult::Failed); + } error!(%slot, %sector_index, %error, "Failed to prove"); // Do not error completely as disk corruption or other reasons why // proving might fail From a4a66a3e87b4b83d24e7c81510e932d6d245d5fa Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 15 Jul 2024 14:38:50 +0300 Subject: [PATCH 16/37] Tiny formatting changes --- .../src/plotter/cpu/metrics.rs | 18 ++++----- .../src/single_disk_farm/metrics.rs | 37 +++++-------------- 2 files changed, 19 insertions(+), 36 deletions(-) diff --git a/crates/subspace-farmer/src/plotter/cpu/metrics.rs b/crates/subspace-farmer/src/plotter/cpu/metrics.rs index 10f320c932..17c9d6d1ac 100644 --- a/crates/subspace-farmer/src/plotter/cpu/metrics.rs +++ b/crates/subspace-farmer/src/plotter/cpu/metrics.rs @@ -62,7 +62,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_downloading_counter", "Number of sectors being downloaded", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_downloading.clone(), ); @@ -70,7 +70,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_downloaded_counter", "Number of downloaded sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_downloaded.clone(), ); @@ -78,7 +78,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_encoding_counter", "Number of sectors being encoded", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_encoding.clone(), ); @@ -86,7 +86,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_encoded_counter", "Number of encoded sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_encoded.clone(), ); @@ -94,7 +94,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_plotting_counter", "Number of sectors being plotted", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_plotting.clone(), ); @@ -102,7 +102,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_plotted_counter", "Number of plotted sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_plotted.clone(), ); @@ -110,7 +110,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "sector_plotting_error_counter", "Number of sector plotting failures", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_plotting_error.clone(), ); @@ -119,7 +119,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "plotting_capacity_total", "Plotting capacity total", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), plotting_capacity_total, ); @@ -127,7 +127,7 @@ impl CpuPlotterMetrics { registry.register_with_unit( "plotting_capacity_used", "Plotting capacity used", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), plotting_capacity_used.clone(), ); diff --git a/crates/subspace-farmer/src/single_disk_farm/metrics.rs b/crates/subspace-farmer/src/single_disk_farm/metrics.rs index 93e435e541..2313d39d64 100644 --- a/crates/subspace-farmer/src/single_disk_farm/metrics.rs +++ b/crates/subspace-farmer/src/single_disk_farm/metrics.rs @@ -66,7 +66,6 @@ impl SingleDiskFarmMetrics { .sub_registry_with_label(("farm_id".into(), farm_id.to_string().into())); let auditing_time = Histogram::new(exponential_buckets(0.0002, 2.0, 15)); - sub_registry.register_with_unit( "auditing_time", "Auditing time", @@ -77,7 +76,6 @@ impl SingleDiskFarmMetrics { let proving_time = Family::<_, _>::new_with_constructor(|| { Histogram::new(exponential_buckets(0.0002, 2.0, 15)) }); - sub_registry.register_with_unit( "proving_time", "Proving time", @@ -86,7 +84,6 @@ impl SingleDiskFarmMetrics { ); let farming_errors = Family::default(); - sub_registry.register( "farming_errors", "Non-fatal farming errors", @@ -94,7 +91,6 @@ impl SingleDiskFarmMetrics { ); let sector_downloading_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); - sub_registry.register_with_unit( "sector_downloading_time", "Sector downloading time", @@ -103,7 +99,6 @@ impl SingleDiskFarmMetrics { ); let sector_encoding_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); - sub_registry.register_with_unit( "sector_encoding_time", "Sector encoding time", @@ -112,7 +107,6 @@ impl SingleDiskFarmMetrics { ); let sector_writing_time = Histogram::new(exponential_buckets(0.0002, 2.0, 15)); - sub_registry.register_with_unit( "sector_writing_time", "Sector writing time", @@ -121,7 +115,6 @@ impl SingleDiskFarmMetrics { ); let sector_plotting_time = Histogram::new(exponential_buckets(0.1, 2.0, 15)); - sub_registry.register_with_unit( "sector_plotting_time", "Sector plotting time", @@ -130,92 +123,82 @@ impl SingleDiskFarmMetrics { ); let sectors_total = Family::default(); - sub_registry.register_with_unit( "sectors_total", "Total number of sectors with corresponding state", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sectors_total.clone(), ); let sector_downloading = Counter::default(); - sub_registry.register_with_unit( "sector_downloading_counter", "Number of sectors being downloaded", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_downloading.clone(), ); let sector_downloaded = Counter::default(); - sub_registry.register_with_unit( "sector_downloaded_counter", "Number of downloaded sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_downloaded.clone(), ); let sector_encoding = Counter::default(); - sub_registry.register_with_unit( "sector_encoding_counter", "Number of sectors being encoded", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_encoding.clone(), ); let sector_encoded = Counter::default(); - sub_registry.register_with_unit( "sector_encoded_counter", "Number of encoded sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_encoded.clone(), ); let sector_writing = Counter::default(); - sub_registry.register_with_unit( "sector_writing_counter", "Number of sectors being written", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_writing.clone(), ); let sector_written = Counter::default(); - sub_registry.register_with_unit( "sector_written_counter", "Number of written sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_written.clone(), ); let sector_plotting = Counter::default(); - sub_registry.register_with_unit( "sector_plotting_counter", "Number of sectors being plotted", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_plotting.clone(), ); let sector_plotted = Counter::default(); - sub_registry.register_with_unit( "sector_plotted_counter", "Number of plotted sectors", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_plotted.clone(), ); let sector_plotting_error = Counter::default(); - sub_registry.register_with_unit( "sector_plotting_error_counter", "Number of sector plotting failures", - Unit::Other("sectors".to_string()), + Unit::Other("Sectors".to_string()), sector_plotting_error.clone(), ); From 7b1d5046afc02feb005700426b241d02c504bcb0 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 15 Jul 2024 15:26:24 +0300 Subject: [PATCH 17/37] Add farmer cache metrics --- .../commands/cluster/controller.rs | 3 +- .../src/bin/subspace-farmer/commands/farm.rs | 6 +- crates/subspace-farmer/src/farmer_cache.rs | 61 +++++++++++++++- .../src/farmer_cache/metrics.rs | 71 +++++++++++++++++++ .../subspace-farmer/src/farmer_cache/tests.rs | 4 +- 5 files changed, 137 insertions(+), 8 deletions(-) create mode 100644 crates/subspace-farmer/src/farmer_cache/metrics.rs diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/controller.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/controller.rs index 01fa23a169..4fa07dbf56 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/controller.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/controller.rs @@ -136,7 +136,8 @@ pub(super) async fn controller( let peer_id = keypair.public().to_peer_id(); let instance = peer_id.to_string(); - let (farmer_cache, farmer_cache_worker) = FarmerCache::new(node_client.clone(), peer_id); + let (farmer_cache, farmer_cache_worker) = + FarmerCache::new(node_client.clone(), peer_id, Some(registry)); // TODO: Metrics diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index c6f7bef455..c2c70d7f55 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -338,12 +338,12 @@ where let keypair = derive_libp2p_keypair(identity.secret_key()); let peer_id = keypair.public().to_peer_id(); - let (farmer_cache, farmer_cache_worker) = FarmerCache::new(node_client.clone(), peer_id); - - // Metrics let mut registry = Registry::with_prefix("subspace_farmer"); let should_start_prometheus_server = !prometheus_listen_on.is_empty(); + let (farmer_cache, farmer_cache_worker) = + FarmerCache::new(node_client.clone(), peer_id, Some(&mut registry)); + let node_client = CachingProxyNodeClient::new(node_client) .await .map_err(|error| anyhow!("Failed to create caching proxy node client: {error}"))?; diff --git a/crates/subspace-farmer/src/farmer_cache.rs b/crates/subspace-farmer/src/farmer_cache.rs index 7a2f24de9e..b89059cc72 100644 --- a/crates/subspace-farmer/src/farmer_cache.rs +++ b/crates/subspace-farmer/src/farmer_cache.rs @@ -3,16 +3,19 @@ //! Farmer cache is a container that orchestrates a bunch of piece and plot caches that together //! persist pieces in a way that is easy to retrieve comparing to decoding pieces from plots. +mod metrics; #[cfg(test)] mod tests; use crate::farm::{MaybePieceStoredResult, PieceCache, PieceCacheId, PieceCacheOffset, PlotCache}; +use crate::farmer_cache::metrics::FarmerCacheMetrics; use crate::node_client::NodeClient; use crate::utils::run_future_in_dedicated_thread; use async_lock::RwLock as AsyncRwLock; use event_listener_primitives::{Bag, HandlerId}; use futures::stream::{FuturesOrdered, FuturesUnordered}; use futures::{select, stream, FutureExt, StreamExt}; +use prometheus_client::registry::Registry; use rayon::prelude::*; use std::collections::{HashMap, VecDeque}; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -84,6 +87,7 @@ where plot_caches: Arc, handlers: Arc, worker_receiver: Option>, + metrics: Option>, } impl FarmerCacheWorker @@ -233,6 +237,10 @@ where debug!("Collecting pieces that were in the cache before"); + if let Some(metrics) = &self.metrics { + metrics.piece_cache_capacity_total.set(0); + metrics.piece_cache_capacity_used.set(0); + } // Build cache state of all backends let maybe_caches_futures = stored_pieces .into_iter() @@ -241,6 +249,11 @@ where .enumerate() .map( |(index, ((mut stored_pieces, mut free_offsets), new_cache))| { + if let Some(metrics) = &self.metrics { + metrics + .piece_cache_capacity_total + .inc_by(new_cache.max_num_elements() as i64); + } run_future_in_dedicated_thread( move || async move { // Hack with first collecting into `Option` with `Option::take()` call @@ -411,6 +424,14 @@ where }); }); + if let Some(metrics) = &self.metrics { + for cache in &mut caches { + metrics + .piece_cache_capacity_used + .inc_by(cache.stored_pieces.len() as i64); + } + } + // Store whatever correct pieces are immediately available after restart self.piece_caches.write().await.clone_from(&caches); @@ -488,6 +509,9 @@ where ); return false; } + if let Some(metrics) = &self.metrics { + metrics.piece_cache_capacity_used.inc(); + } cache .stored_pieces .insert(RecordKey::from(piece_index.to_multihash()), offset); @@ -798,6 +822,9 @@ where %offset, "Successfully stored piece in cache" ); + if let Some(metrics) = &self.metrics { + metrics.piece_cache_capacity_used.inc(); + } cache.stored_pieces.insert(record_key, offset); } return; @@ -905,6 +932,7 @@ pub struct FarmerCache { handlers: Arc, // We do not want to increase capacity unnecessarily on clone worker_sender: Arc>, + metrics: Option>, } impl FarmerCache { @@ -912,7 +940,11 @@ impl FarmerCache { /// /// NOTE: Returned future is async, but does blocking operations and should be running in /// dedicated thread. - pub fn new(node_client: NC, peer_id: PeerId) -> (Self, FarmerCacheWorker) + pub fn new( + node_client: NC, + peer_id: PeerId, + registry: Option<&mut Registry>, + ) -> (Self, FarmerCacheWorker) where NC: NodeClient, { @@ -924,6 +956,7 @@ impl FarmerCache { caches: AsyncRwLock::default(), next_plot_cache: AtomicUsize::new(0), }); + let metrics = registry.map(|registry| Arc::new(FarmerCacheMetrics::new(registry))); let instance = Self { peer_id, @@ -931,6 +964,7 @@ impl FarmerCache { plot_caches: Arc::clone(&plot_caches), handlers: Arc::clone(&handlers), worker_sender: Arc::new(worker_sender), + metrics: metrics.clone(), }; let worker = FarmerCacheWorker { peer_id, @@ -939,6 +973,7 @@ impl FarmerCache { plot_caches, handlers, worker_receiver: Some(worker_receiver), + metrics, }; (instance, worker) @@ -952,7 +987,20 @@ impl FarmerCache { }; match cache.backend.read_piece(offset).await { Ok(maybe_piece) => { - return maybe_piece.map(|(_piece_index, piece)| piece); + return match maybe_piece { + Some((_piece_index, piece)) => { + if let Some(metrics) = &self.metrics { + metrics.cache_hit.inc(); + } + Some(piece) + } + None => { + if let Some(metrics) = &self.metrics { + metrics.cache_miss.inc(); + } + None + } + }; } Err(error) => { error!( @@ -971,6 +1019,9 @@ impl FarmerCache { trace!(%error, "Failed to send ForgetKey command to worker"); } + if let Some(metrics) = &self.metrics { + metrics.cache_error.inc(); + } return None; } } @@ -978,10 +1029,16 @@ impl FarmerCache { for cache in self.plot_caches.caches.read().await.iter() { if let Ok(Some(piece)) = cache.read_piece(&key).await { + if let Some(metrics) = &self.metrics { + metrics.cache_hit.inc(); + } return Some(piece); } } + if let Some(metrics) = &self.metrics { + metrics.cache_miss.inc(); + } None } diff --git a/crates/subspace-farmer/src/farmer_cache/metrics.rs b/crates/subspace-farmer/src/farmer_cache/metrics.rs new file mode 100644 index 0000000000..1d05cb1707 --- /dev/null +++ b/crates/subspace-farmer/src/farmer_cache/metrics.rs @@ -0,0 +1,71 @@ +//! Metrics for farmer cache + +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::registry::{Registry, Unit}; +use std::sync::atomic::{AtomicI64, AtomicU64}; + +/// Metrics for farmer cache +#[derive(Debug)] +pub(super) struct FarmerCacheMetrics { + pub(super) cache_hit: Counter, + pub(super) cache_miss: Counter, + pub(super) cache_error: Counter, + pub(super) piece_cache_capacity_total: Gauge, + pub(super) piece_cache_capacity_used: Gauge, +} + +impl FarmerCacheMetrics { + /// Create new instance + pub(super) fn new(registry: &mut Registry) -> Self { + let registry = registry.sub_registry_with_prefix("farmer_cache"); + + let cache_hit = Counter::default(); + registry.register_with_unit( + "cache_hit", + "Cache hit", + Unit::Other("Requests".to_string()), + cache_hit.clone(), + ); + + let cache_miss = Counter::default(); + registry.register_with_unit( + "cache_miss", + "Cache miss", + Unit::Other("Requests".to_string()), + cache_miss.clone(), + ); + + let cache_error = Counter::default(); + registry.register_with_unit( + "cache_error", + "Cache error", + Unit::Other("Requests".to_string()), + cache_error.clone(), + ); + + let piece_cache_capacity_total = Gauge::default(); + registry.register_with_unit( + "piece_cache_capacity_total", + "Piece cache capacity total", + Unit::Other("Pieces".to_string()), + piece_cache_capacity_total.clone(), + ); + + let piece_cache_capacity_used = Gauge::default(); + registry.register_with_unit( + "piece_cache_capacity_used", + "Piece cache capacity used", + Unit::Other("Pieces".to_string()), + piece_cache_capacity_used.clone(), + ); + + Self { + cache_hit, + cache_miss, + cache_error, + piece_cache_capacity_total, + piece_cache_capacity_used, + } + } +} diff --git a/crates/subspace-farmer/src/farmer_cache/tests.rs b/crates/subspace-farmer/src/farmer_cache/tests.rs index 1861d4e193..e49887a5da 100644 --- a/crates/subspace-farmer/src/farmer_cache/tests.rs +++ b/crates/subspace-farmer/src/farmer_cache/tests.rs @@ -185,7 +185,7 @@ async fn basic() { { let (farmer_cache, farmer_cache_worker) = - FarmerCache::new(node_client.clone(), public_key.to_peer_id()); + FarmerCache::new(node_client.clone(), public_key.to_peer_id(), None); let farmer_cache_worker_exited = tokio::spawn(farmer_cache_worker.run(piece_getter.clone())); @@ -385,7 +385,7 @@ async fn basic() { pieces.lock().clear(); let (farmer_cache, farmer_cache_worker) = - FarmerCache::new(node_client.clone(), public_key.to_peer_id()); + FarmerCache::new(node_client.clone(), public_key.to_peer_id(), None); let farmer_cache_worker_exited = tokio::spawn(farmer_cache_worker.run(piece_getter)); From 8b046bd1cbfcfa747be7e20fd5c45554b3d6fd89 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 16 Jul 2024 04:16:18 +0300 Subject: [PATCH 18/37] Add disk piece cache metrics, extend farm cache metrics --- .../subspace-farmer/commands/cluster/cache.rs | 6 +- .../subspace-farmer/src/disk_piece_cache.rs | 44 +++++++++- .../src/disk_piece_cache/metrics.rs | 87 +++++++++++++++++++ .../src/disk_piece_cache/tests.rs | 6 +- crates/subspace-farmer/src/farmer_cache.rs | 16 ++-- .../src/farmer_cache/metrics.rs | 54 ++++++++---- .../subspace-farmer/src/farmer_cache/tests.rs | 8 +- .../subspace-farmer/src/single_disk_farm.rs | 47 +++++++--- .../src/single_disk_farm/piece_cache.rs | 8 +- 9 files changed, 221 insertions(+), 55 deletions(-) create mode 100644 crates/subspace-farmer/src/disk_piece_cache/metrics.rs diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/cache.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/cache.rs index f9fa44e22c..8efac9d909 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/cache.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/cache.rs @@ -115,7 +115,7 @@ pub(super) struct CacheArgs { pub(super) async fn cache( nats_client: NatsClient, - _registry: &mut Registry, + registry: &mut Registry, cache_args: CacheArgs, ) -> anyhow::Result>>>> { let CacheArgs { @@ -157,8 +157,6 @@ pub(super) async fn cache( None }; - // TODO: Metrics - let caches = disk_caches .iter() .map(|disk_cache| { @@ -166,6 +164,8 @@ pub(super) async fn cache( &disk_cache.directory, u32::try_from(disk_cache.allocated_space / DiskPieceCache::element_size() as u64) .unwrap_or(u32::MAX), + None, + Some(registry), ) .map_err(|error| { anyhow!( diff --git a/crates/subspace-farmer/src/disk_piece_cache.rs b/crates/subspace-farmer/src/disk_piece_cache.rs index 0c6eaad14c..64e228432c 100644 --- a/crates/subspace-farmer/src/disk_piece_cache.rs +++ b/crates/subspace-farmer/src/disk_piece_cache.rs @@ -1,8 +1,10 @@ //! Disk piece cache implementation +mod metrics; #[cfg(test)] mod tests; +use crate::disk_piece_cache::metrics::DiskPieceCacheMetrics; use crate::farm; use crate::farm::{FarmError, PieceCacheId, PieceCacheOffset}; #[cfg(windows)] @@ -14,6 +16,7 @@ use bytes::BytesMut; use futures::channel::mpsc; use futures::{stream, SinkExt, Stream, StreamExt}; use parking_lot::Mutex; +use prometheus_client::registry::Registry; #[cfg(not(windows))] use std::fs::{File, OpenOptions}; use std::path::Path; @@ -67,6 +70,7 @@ struct Inner { #[cfg(windows)] file: UnbufferedIoFileWindows, max_num_elements: u32, + metrics: Option, } /// Dedicated piece cache stored on one disk, is used both to accelerate DSN queries and to plot @@ -182,7 +186,12 @@ impl DiskPieceCache { pub(crate) const FILE_NAME: &'static str = "piece_cache.bin"; /// Open cache, capacity is measured in elements of [`DiskPieceCache::element_size()`] size - pub fn open(directory: &Path, capacity: u32) -> Result { + pub fn open( + directory: &Path, + capacity: u32, + id: Option, + registry: Option<&mut Registry>, + ) -> Result { if capacity == 0 { return Err(DiskPieceCacheError::ZeroCapacity); } @@ -214,12 +223,16 @@ impl DiskPieceCache { file.set_len(expected_size)?; } + // ID for cache is ephemeral unless provided explicitly + let id = id.unwrap_or_else(PieceCacheId::new); + let metrics = registry.map(|registry| DiskPieceCacheMetrics::new(registry, &id, capacity)); + Ok(Self { inner: Arc::new(Inner { - // ID for cache is ephemeral - id: PieceCacheId::new(), + id, file, max_num_elements: capacity, + metrics, }), }) } @@ -237,6 +250,15 @@ impl DiskPieceCache { &self, ) -> impl ExactSizeIterator)> + '_ { let mut element = vec![0; Self::element_size() as usize]; + let count_total = self + .inner + .metrics + .as_ref() + .map(|metrics| { + metrics.contents.inc(); + metrics.capacity_used.get() == 0 + }) + .unwrap_or_default(); let mut current_skip = 0; // TODO: Parallelize or read in larger batches @@ -250,6 +272,9 @@ impl DiskPieceCache { if maybe_piece_index.is_none() { current_skip += 1; } else { + if count_total && let Some(metrics) = &self.inner.metrics { + metrics.capacity_used.inc(); + } current_skip = 0; } @@ -284,6 +309,13 @@ impl DiskPieceCache { }); } + if let Some(metrics) = &self.inner.metrics { + metrics.write_piece.inc(); + let capacity_used = i64::from(offset + 1); + if metrics.capacity_used.get() != capacity_used { + metrics.capacity_used.set(capacity_used); + } + } let element_offset = u64::from(offset) * u64::from(Self::element_size()); let piece_index_bytes = piece_index.to_bytes(); @@ -320,6 +352,9 @@ impl DiskPieceCache { }); } + if let Some(metrics) = &self.inner.metrics { + metrics.read_piece_index.inc(); + } self.read_piece_internal(offset, &mut vec![0; Self::element_size() as usize]) } @@ -342,6 +377,9 @@ impl DiskPieceCache { }); } + if let Some(metrics) = &self.inner.metrics { + metrics.read_piece.inc(); + } let mut element = BytesMut::zeroed(Self::element_size() as usize); if let Some(piece_index) = self.read_piece_internal(offset, &mut element)? { let element = element.freeze(); diff --git a/crates/subspace-farmer/src/disk_piece_cache/metrics.rs b/crates/subspace-farmer/src/disk_piece_cache/metrics.rs new file mode 100644 index 0000000000..e8172bb6ac --- /dev/null +++ b/crates/subspace-farmer/src/disk_piece_cache/metrics.rs @@ -0,0 +1,87 @@ +//! Metrics for disk piece cache + +use crate::farm::PieceCacheId; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::registry::{Registry, Unit}; +use std::sync::atomic::{AtomicI64, AtomicU64}; + +/// Metrics for disk piece cache +#[derive(Debug)] +pub(super) struct DiskPieceCacheMetrics { + pub(super) contents: Counter, + pub(super) read_piece: Counter, + pub(super) read_piece_index: Counter, + pub(super) write_piece: Counter, + pub(super) capacity_used: Gauge, +} + +impl DiskPieceCacheMetrics { + /// Create new instance + pub(super) fn new( + registry: &mut Registry, + cache_id: &PieceCacheId, + max_num_elements: u32, + ) -> Self { + let registry = registry + .sub_registry_with_prefix("disk_piece_cache") + .sub_registry_with_label(("cache_id".into(), cache_id.to_string().into())); + + let contents = Counter::default(); + registry.register_with_unit( + "contents", + "Contents requests", + Unit::Other("Requests".to_string()), + contents.clone(), + ); + + let read_piece = Counter::default(); + registry.register_with_unit( + "read_piece", + "Read piece requests", + Unit::Other("Pieces".to_string()), + read_piece.clone(), + ); + + let read_piece_index = Counter::default(); + registry.register_with_unit( + "read_piece_index", + "Read piece index requests", + Unit::Other("Requests".to_string()), + read_piece_index.clone(), + ); + + let write_piece = Counter::default(); + registry.register_with_unit( + "write_piece", + "Write piece requests", + Unit::Other("Pieces".to_string()), + write_piece.clone(), + ); + + let capacity_total = Gauge::::default(); + capacity_total.set(i64::from(max_num_elements)); + registry.register_with_unit( + "capacity_total", + "Piece cache capacity total", + Unit::Other("Pieces".to_string()), + capacity_total, + ); + + let capacity_used = Gauge::default(); + registry.register_with_unit( + "capacity_used", + "Piece cache capacity used", + Unit::Other("Pieces".to_string()), + capacity_used.clone(), + ); + + Self { + contents, + read_piece, + read_piece_index, + write_piece, + capacity_used, + } + } +} diff --git a/crates/subspace-farmer/src/disk_piece_cache/tests.rs b/crates/subspace-farmer/src/disk_piece_cache/tests.rs index 9038e77908..4db64b695c 100644 --- a/crates/subspace-farmer/src/disk_piece_cache/tests.rs +++ b/crates/subspace-farmer/src/disk_piece_cache/tests.rs @@ -8,7 +8,7 @@ use tempfile::tempdir; fn basic() { let path = tempdir().unwrap(); { - let disk_piece_cache = DiskPieceCache::open(path.as_ref(), 2).unwrap(); + let disk_piece_cache = DiskPieceCache::open(path.as_ref(), 2, None, None).unwrap(); // Initially empty assert_eq!( @@ -115,7 +115,7 @@ fn basic() { // Reopening works { - let disk_piece_cache = DiskPieceCache::open(path.as_ref(), 2).unwrap(); + let disk_piece_cache = DiskPieceCache::open(path.as_ref(), 2, None, None).unwrap(); // Two pieces stored assert_eq!( disk_piece_cache @@ -130,7 +130,7 @@ fn basic() { { DiskPieceCache::wipe(path.as_ref()).unwrap(); - let disk_piece_cache = DiskPieceCache::open(path.as_ref(), 2).unwrap(); + let disk_piece_cache = DiskPieceCache::open(path.as_ref(), 2, None, None).unwrap(); // Wiped successfully assert_eq!( disk_piece_cache diff --git a/crates/subspace-farmer/src/farmer_cache.rs b/crates/subspace-farmer/src/farmer_cache.rs index b89059cc72..b0f7af23df 100644 --- a/crates/subspace-farmer/src/farmer_cache.rs +++ b/crates/subspace-farmer/src/farmer_cache.rs @@ -990,13 +990,13 @@ impl FarmerCache { return match maybe_piece { Some((_piece_index, piece)) => { if let Some(metrics) = &self.metrics { - metrics.cache_hit.inc(); + metrics.cache_get_hit.inc(); } Some(piece) } None => { if let Some(metrics) = &self.metrics { - metrics.cache_miss.inc(); + metrics.cache_get_miss.inc(); } None } @@ -1020,7 +1020,7 @@ impl FarmerCache { } if let Some(metrics) = &self.metrics { - metrics.cache_error.inc(); + metrics.cache_get_error.inc(); } return None; } @@ -1030,14 +1030,14 @@ impl FarmerCache { for cache in self.plot_caches.caches.read().await.iter() { if let Ok(Some(piece)) = cache.read_piece(&key).await { if let Some(metrics) = &self.metrics { - metrics.cache_hit.inc(); + metrics.cache_get_hit.inc(); } return Some(piece); } } if let Some(metrics) = &self.metrics { - metrics.cache_miss.inc(); + metrics.cache_get_miss.inc(); } None } @@ -1053,9 +1053,15 @@ impl FarmerCache { let Some(&offset) = cache.stored_pieces.get(&key) else { continue; }; + if let Some(metrics) = &self.metrics { + metrics.cache_find_hit.inc(); + } return Some((*cache.backend.id(), offset)); } + if let Some(metrics) = &self.metrics { + metrics.cache_find_miss.inc(); + } None } diff --git a/crates/subspace-farmer/src/farmer_cache/metrics.rs b/crates/subspace-farmer/src/farmer_cache/metrics.rs index 1d05cb1707..9f1fc53ec1 100644 --- a/crates/subspace-farmer/src/farmer_cache/metrics.rs +++ b/crates/subspace-farmer/src/farmer_cache/metrics.rs @@ -8,9 +8,11 @@ use std::sync::atomic::{AtomicI64, AtomicU64}; /// Metrics for farmer cache #[derive(Debug)] pub(super) struct FarmerCacheMetrics { - pub(super) cache_hit: Counter, - pub(super) cache_miss: Counter, - pub(super) cache_error: Counter, + pub(super) cache_get_hit: Counter, + pub(super) cache_get_miss: Counter, + pub(super) cache_get_error: Counter, + pub(super) cache_find_hit: Counter, + pub(super) cache_find_miss: Counter, pub(super) piece_cache_capacity_total: Gauge, pub(super) piece_cache_capacity_used: Gauge, } @@ -20,28 +22,44 @@ impl FarmerCacheMetrics { pub(super) fn new(registry: &mut Registry) -> Self { let registry = registry.sub_registry_with_prefix("farmer_cache"); - let cache_hit = Counter::default(); + let cache_get_hit = Counter::default(); registry.register_with_unit( - "cache_hit", - "Cache hit", + "cache_get_hit", + "Cache get hit", Unit::Other("Requests".to_string()), - cache_hit.clone(), + cache_get_hit.clone(), ); - let cache_miss = Counter::default(); + let cache_get_miss = Counter::default(); registry.register_with_unit( - "cache_miss", - "Cache miss", + "cache_get_miss", + "Cache get miss", Unit::Other("Requests".to_string()), - cache_miss.clone(), + cache_get_miss.clone(), ); - let cache_error = Counter::default(); + let cache_get_error = Counter::default(); registry.register_with_unit( "cache_error", - "Cache error", + "Cache get error", Unit::Other("Requests".to_string()), - cache_error.clone(), + cache_get_error.clone(), + ); + + let cache_find_hit = Counter::default(); + registry.register_with_unit( + "cache_find_hit", + "Cache find hit", + Unit::Other("Requests".to_string()), + cache_find_hit.clone(), + ); + + let cache_find_miss = Counter::default(); + registry.register_with_unit( + "cache_find_miss", + "Cache find miss", + Unit::Other("Requests".to_string()), + cache_find_miss.clone(), ); let piece_cache_capacity_total = Gauge::default(); @@ -61,9 +79,11 @@ impl FarmerCacheMetrics { ); Self { - cache_hit, - cache_miss, - cache_error, + cache_get_hit, + cache_get_miss, + cache_get_error, + cache_find_hit, + cache_find_miss, piece_cache_capacity_total, piece_cache_capacity_used, } diff --git a/crates/subspace-farmer/src/farmer_cache/tests.rs b/crates/subspace-farmer/src/farmer_cache/tests.rs index e49887a5da..2ffd343d27 100644 --- a/crates/subspace-farmer/src/farmer_cache/tests.rs +++ b/crates/subspace-farmer/src/farmer_cache/tests.rs @@ -207,8 +207,8 @@ async fn basic() { farmer_cache .replace_backing_caches( vec![ - Arc::new(DiskPieceCache::open(path1.as_ref(), 1).unwrap()), - Arc::new(DiskPieceCache::open(path2.as_ref(), 1).unwrap()), + Arc::new(DiskPieceCache::open(path1.as_ref(), 1, None, None).unwrap()), + Arc::new(DiskPieceCache::open(path2.as_ref(), 1, None, None).unwrap()), ], vec![], ) @@ -407,8 +407,8 @@ async fn basic() { farmer_cache .replace_backing_caches( vec![ - Arc::new(DiskPieceCache::open(path1.as_ref(), 1).unwrap()), - Arc::new(DiskPieceCache::open(path2.as_ref(), 1).unwrap()), + Arc::new(DiskPieceCache::open(path1.as_ref(), 1, None, None).unwrap()), + Arc::new(DiskPieceCache::open(path2.as_ref(), 1, None, None).unwrap()), ], vec![], ) diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index 72c64bd62e..d3114b1ffc 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -17,8 +17,8 @@ pub mod unbuffered_io_file_windows; use crate::disk_piece_cache::{DiskPieceCache, DiskPieceCacheError}; use crate::farm::{ - Farm, FarmId, FarmingError, FarmingNotification, HandlerFn, PieceReader, PlottedSectors, - SectorUpdate, + Farm, FarmId, FarmingError, FarmingNotification, HandlerFn, PieceCacheId, PieceReader, + PlottedSectors, SectorUpdate, }; use crate::node_client::NodeClient; use crate::plotter::Plotter; @@ -86,6 +86,7 @@ use subspace_rpc_primitives::{FarmerAppInfo, SolutionResponse}; use thiserror::Error; use tokio::runtime::Handle; use tokio::sync::{broadcast, Barrier, Semaphore}; +use tokio::task; use tracing::{debug, error, info, trace, warn, Instrument, Span}; // Refuse to compile on non-64-bit platforms, offsets may fail on those when converting from u64 to @@ -684,7 +685,7 @@ struct SingleDiskFarmInit { metadata_header: PlotMetadataHeader, target_sector_count: u16, sectors_metadata: Arc>>, - piece_cache: SingleDiskPieceCache, + piece_cache_capacity: u32, plot_cache: DiskPlotCache, } @@ -839,10 +840,36 @@ impl SingleDiskFarm { metadata_header, target_sector_count, sectors_metadata, - piece_cache, + piece_cache_capacity, plot_cache, } = single_disk_farm_init; + let piece_cache = { + // Convert farm ID into cache ID for single disk farm + let FarmId::Ulid(id) = *single_disk_farm_info.id(); + let id = PieceCacheId::Ulid(id); + + SingleDiskPieceCache::new( + id, + if piece_cache_capacity == 0 { + None + } else { + Some(task::block_in_place(|| { + if let Some(registry) = registry { + DiskPieceCache::open( + &directory, + piece_cache_capacity, + Some(id), + Some(*registry.lock()), + ) + } else { + DiskPieceCache::open(&directory, piece_cache_capacity, Some(id), None) + } + })?) + }, + ) + }; + let public_key = *single_disk_farm_info.public_key(); let pieces_in_sector = single_disk_farm_info.pieces_in_sector(); let sector_size = sector_size(pieces_in_sector); @@ -1397,7 +1424,7 @@ impl SingleDiskFarm { plot_file_size.div_ceil(DISK_SECTOR_SIZE as u64) * DISK_SECTOR_SIZE as u64; // Remaining space will be used for caching purposes - let cache_capacity = if cache_percentage > 0 { + let piece_cache_capacity = if cache_percentage > 0 { let cache_space = allocated_space - fixed_space_usage - plot_file_size @@ -1553,14 +1580,6 @@ impl SingleDiskFarm { let plot_file = Arc::new(plot_file); - let piece_cache = SingleDiskPieceCache::new( - *single_disk_farm_info.id(), - if cache_capacity == 0 { - None - } else { - Some(DiskPieceCache::open(directory, cache_capacity)?) - }, - ); let plot_cache = DiskPlotCache::new( &plot_file, §ors_metadata, @@ -1577,7 +1596,7 @@ impl SingleDiskFarm { metadata_header, target_sector_count, sectors_metadata, - piece_cache, + piece_cache_capacity, plot_cache, }) } diff --git a/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs b/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs index 80deddeb71..fb1d82434f 100644 --- a/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs +++ b/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs @@ -2,7 +2,7 @@ use crate::disk_piece_cache::DiskPieceCache; use crate::farm; -use crate::farm::{FarmError, FarmId, PieceCacheId, PieceCacheOffset}; +use crate::farm::{FarmError, PieceCacheId, PieceCacheOffset}; use async_trait::async_trait; use futures::{stream, Stream}; use subspace_core_primitives::{Piece, PieceIndex}; @@ -84,11 +84,7 @@ impl farm::PieceCache for SingleDiskPieceCache { } impl SingleDiskPieceCache { - pub(crate) fn new(farm_id: FarmId, maybe_piece_cache: Option) -> Self { - // Convert farm ID into cache ID for single disk farm - let FarmId::Ulid(id) = farm_id; - let id = PieceCacheId::Ulid(id); - + pub(crate) fn new(id: PieceCacheId, maybe_piece_cache: Option) -> Self { Self { id, maybe_piece_cache, From fa9f6af5bbaa93e4b92a4ad142292f218e5b2eea Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 16 Jul 2024 04:33:35 +0300 Subject: [PATCH 19/37] Add skipped slots metric --- crates/subspace-farmer/src/single_disk_farm.rs | 3 ++- crates/subspace-farmer/src/single_disk_farm/farming.rs | 8 ++++++-- crates/subspace-farmer/src/single_disk_farm/metrics.rs | 9 +++++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index d3114b1ffc..0a74984edc 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -1070,9 +1070,10 @@ impl SingleDiskFarm { tasks.push(Box::pin({ let node_client = node_client.clone(); + let metrics = metrics.clone(); async move { - slot_notification_forwarder(&node_client, slot_info_forwarder_sender) + slot_notification_forwarder(&node_client, slot_info_forwarder_sender, metrics) .await .map_err(BackgroundTaskError::Farming) } diff --git a/crates/subspace-farmer/src/single_disk_farm/farming.rs b/crates/subspace-farmer/src/single_disk_farm/farming.rs index a3864f4eb4..496a8473ab 100644 --- a/crates/subspace-farmer/src/single_disk_farm/farming.rs +++ b/crates/subspace-farmer/src/single_disk_farm/farming.rs @@ -37,6 +37,7 @@ const NON_FATAL_ERROR_LIMIT: usize = 10; pub(super) async fn slot_notification_forwarder( node_client: &NC, mut slot_info_forwarder_sender: mpsc::Sender, + metrics: Option>, ) -> Result<(), FarmingError> where NC: NodeClient, @@ -53,9 +54,12 @@ where let slot = slot_info.slot_number; - // Error means farmer is still solving for previous slot, which is too late and - // we need to skip this slot + // Error means farmer is still solving for previous slot, which is too late, and we need to + // skip this slot if slot_info_forwarder_sender.try_send(slot_info).is_err() { + if let Some(metrics) = &metrics { + metrics.skipped_slots.inc(); + } debug!(%slot, "Slow farming, skipping slot"); } } diff --git a/crates/subspace-farmer/src/single_disk_farm/metrics.rs b/crates/subspace-farmer/src/single_disk_farm/metrics.rs index 2313d39d64..9bc2807673 100644 --- a/crates/subspace-farmer/src/single_disk_farm/metrics.rs +++ b/crates/subspace-farmer/src/single_disk_farm/metrics.rs @@ -35,6 +35,7 @@ impl fmt::Display for SectorState { #[derive(Debug)] pub(super) struct SingleDiskFarmMetrics { pub(super) auditing_time: Histogram, + pub(super) skipped_slots: Counter, proving_time: Family, Histogram>, farming_errors: Family, Counter>, pub(super) sector_downloading_time: Histogram, @@ -73,6 +74,13 @@ impl SingleDiskFarmMetrics { auditing_time.clone(), ); + let skipped_slots = Counter::default(); + sub_registry.register( + "skipped_slots", + "Completely skipped slots (not even auditing)", + skipped_slots.clone(), + ); + let proving_time = Family::<_, _>::new_with_constructor(|| { Histogram::new(exponential_buckets(0.0002, 2.0, 15)) }); @@ -204,6 +212,7 @@ impl SingleDiskFarmMetrics { let metrics = Self { auditing_time, + skipped_slots, proving_time, farming_errors, sector_downloading_time, From 13c958389fff5e4fbd8ec136421d1d1c3ca2eff1 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 16 Jul 2024 07:08:10 +0300 Subject: [PATCH 20/37] Rename `max_pieces_in_sector` to `pieces_in_sector` --- crates/subspace-core-primitives/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/subspace-core-primitives/src/lib.rs b/crates/subspace-core-primitives/src/lib.rs index 58a64bb333..1a45d11a4b 100644 --- a/crates/subspace-core-primitives/src/lib.rs +++ b/crates/subspace-core-primitives/src/lib.rs @@ -146,13 +146,13 @@ pub type SolutionRange = u64; pub const fn sectors_to_solution_range( sectors: u64, slot_probability: (u64, u64), - max_pieces_in_sector: u16, + pieces_in_sector: u16, ) -> SolutionRange { let solution_range = SolutionRange::MAX // Account for slot probability / slot_probability.1 * slot_probability.0 // Now take sector size and probability of hitting occupied s-bucket in sector into account - / (max_pieces_in_sector as u64 * Record::NUM_CHUNKS as u64 / Record::NUM_S_BUCKETS as u64); + / (pieces_in_sector as u64 * Record::NUM_CHUNKS as u64 / Record::NUM_S_BUCKETS as u64); // Take number of sectors into account solution_range / sectors @@ -165,13 +165,13 @@ pub const fn sectors_to_solution_range( pub const fn solution_range_to_sectors( solution_range: SolutionRange, slot_probability: (u64, u64), - max_pieces_in_sector: u16, + pieces_in_sector: u16, ) -> u64 { let sectors = SolutionRange::MAX // Account for slot probability / slot_probability.1 * slot_probability.0 // Now take sector size and probability of hitting occupied s-bucket in sector into account - / (max_pieces_in_sector as u64 * Record::NUM_CHUNKS as u64 / Record::NUM_S_BUCKETS as u64); + / (pieces_in_sector as u64 * Record::NUM_CHUNKS as u64 / Record::NUM_S_BUCKETS as u64); // Take solution range into account sectors / solution_range From 298dc9f0f508a17ea4f1818017f76692184309e7 Mon Sep 17 00:00:00 2001 From: tediou5 Date: Tue, 16 Jul 2024 19:04:40 +0800 Subject: [PATCH 21/37] chore: remove unnecessary async & result --- .../src/bin/subspace-farmer/commands/cluster/farmer.rs | 10 +++------- crates/subspace-farmer/src/cluster/controller.rs | 6 +++--- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs index bf286452c8..16ff9de26d 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/cluster/farmer.rs @@ -173,13 +173,9 @@ where None }; - let node_client = CachingProxyNodeClient::new( - ClusterNodeClient::new(nats_client.clone()) - .await - .map_err(|error| anyhow!("Failed to create cluster node client: {error}"))?, - ) - .await - .map_err(|error| anyhow!("Failed to create caching proxy node client: {error}"))?; + let node_client = CachingProxyNodeClient::new(ClusterNodeClient::new(nats_client.clone())) + .await + .map_err(|error| anyhow!("Failed to create caching proxy node client: {error}"))?; let farmer_app_info = node_client .farmer_app_info() diff --git a/crates/subspace-farmer/src/cluster/controller.rs b/crates/subspace-farmer/src/cluster/controller.rs index 55d0fc51c9..602ea1385c 100644 --- a/crates/subspace-farmer/src/cluster/controller.rs +++ b/crates/subspace-farmer/src/cluster/controller.rs @@ -275,11 +275,11 @@ pub struct ClusterNodeClient { impl ClusterNodeClient { /// Create a new instance - pub async fn new(nats_client: NatsClient) -> anyhow::Result { - Ok(Self { + pub fn new(nats_client: NatsClient) -> Self { + Self { nats_client, last_slot_info_instance: Arc::default(), - }) + } } } From b92edea2c6ea0e6fb298fee416d2be67d65ca1d2 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 16 Jul 2024 18:36:19 +0300 Subject: [PATCH 22/37] Use snap sync by default --- crates/subspace-node/src/commands/run/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/subspace-node/src/commands/run/consensus.rs b/crates/subspace-node/src/commands/run/consensus.rs index 916e5baf29..b074569279 100644 --- a/crates/subspace-node/src/commands/run/consensus.rs +++ b/crates/subspace-node/src/commands/run/consensus.rs @@ -407,7 +407,7 @@ pub(super) struct ConsensusChainOptions { timekeeper_options: TimekeeperOptions, /// Sync mode - #[arg(long, default_value_t = ChainSyncMode::Full)] + #[arg(long, default_value_t = ChainSyncMode::Snap)] sync: ChainSyncMode, } From e4b98b8a97db2bdc116d8db8e1489801ee4ea3bb Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 07:23:44 +0300 Subject: [PATCH 23/37] Do not issue segment notifications for already archived segments on restart, this is no longer necessary --- crates/sc-consensus-subspace/src/archiver.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/crates/sc-consensus-subspace/src/archiver.rs b/crates/sc-consensus-subspace/src/archiver.rs index 756a325147..fde460c97a 100644 --- a/crates/sc-consensus-subspace/src/archiver.rs +++ b/crates/sc-consensus-subspace/src/archiver.rs @@ -452,7 +452,6 @@ where { confirmation_depth_k: BlockNumber, archiver: Archiver, - older_archived_segments: Vec, best_archived_block: (Block::Hash, NumberFor), } @@ -600,8 +599,6 @@ where Archiver::new(subspace_link.kzg().clone()).expect("Incorrect parameters for archiver") }; - let mut older_archived_segments = Vec::new(); - // Process blocks since last fully archived block up to the current head minus K { let blocks_to_archive_from = archiver @@ -691,8 +688,6 @@ where .map(|archived_segment| archived_segment.segment_header) .collect(); - older_archived_segments.extend(archived_segments); - if !new_segment_headers.is_empty() { segment_headers_store.add_segment_headers(&new_segment_headers)?; } @@ -703,7 +698,6 @@ where Ok(InitializedArchiver { confirmation_depth_k, archiver, - older_archived_segments, best_archived_block: best_archived_block .expect("Must always set if there is no logical error; qed"), }) @@ -821,7 +815,6 @@ where let InitializedArchiver { confirmation_depth_k, mut archiver, - older_archived_segments, best_archived_block, } = archiver; let (mut best_archived_block_hash, mut best_archived_block_number) = best_archived_block; @@ -829,15 +822,6 @@ where let archived_segment_notification_sender = subspace_link.archived_segment_notification_sender.clone(); - // Farmers may have not received all previous segments, send them now. - for archived_segment in older_archived_segments { - send_archived_segment_notification( - &archived_segment_notification_sender, - archived_segment, - ) - .await; - } - while let Some(ref block_import_notification) = block_importing_notification_stream.next().await { @@ -865,7 +849,6 @@ where InitializedArchiver { confirmation_depth_k: _, archiver, - older_archived_segments: _, best_archived_block: (best_archived_block_hash, best_archived_block_number), } = initialize_archiver(&segment_headers_store, &subspace_link, client.as_ref())?; From 0b2b522f386b59712aad0dfc0992ec821d8af579 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 07:28:32 +0300 Subject: [PATCH 24/37] Remove unnecessary `ref` and `InitializedArchiver.confirmation_depth_k` --- crates/sc-consensus-subspace/src/archiver.rs | 17 +++++------------ crates/sc-consensus-subspace/src/slot_worker.rs | 2 +- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/crates/sc-consensus-subspace/src/archiver.rs b/crates/sc-consensus-subspace/src/archiver.rs index fde460c97a..0fc783f838 100644 --- a/crates/sc-consensus-subspace/src/archiver.rs +++ b/crates/sc-consensus-subspace/src/archiver.rs @@ -450,7 +450,6 @@ struct InitializedArchiver where Block: BlockT, { - confirmation_depth_k: BlockNumber, archiver: Archiver, best_archived_block: (Block::Hash, NumberFor), } @@ -541,12 +540,8 @@ where .unwrap_or_else(|_| { unreachable!("Block number fits into block number; qed"); }); - let best_block_hash = client_info.best_hash; - let confirmation_depth_k = client - .runtime_api() - .chain_constants(best_block_hash)? - .confirmation_depth_k(); + let confirmation_depth_k = subspace_link.chain_constants.confirmation_depth_k(); let mut best_block_to_archive = best_block_number.saturating_sub(confirmation_depth_k); if (best_block_to_archive..best_block_number) @@ -696,7 +691,6 @@ where } Ok(InitializedArchiver { - confirmation_depth_k, archiver, best_archived_block: best_archived_block .expect("Must always set if there is no logical error; qed"), @@ -802,6 +796,7 @@ where None }; + // Subscribing synchronously before returning let mut block_importing_notification_stream = subspace_link .block_importing_notification_stream .subscribe(); @@ -811,9 +806,9 @@ where Some(archiver) => archiver, None => initialize_archiver(&segment_headers_store, &subspace_link, client.as_ref())?, }; + let confirmation_depth_k = subspace_link.chain_constants.confirmation_depth_k().into(); let InitializedArchiver { - confirmation_depth_k, mut archiver, best_archived_block, } = archiver; @@ -822,12 +817,11 @@ where let archived_segment_notification_sender = subspace_link.archived_segment_notification_sender.clone(); - while let Some(ref block_import_notification) = - block_importing_notification_stream.next().await + while let Some(block_import_notification) = block_importing_notification_stream.next().await { let block_number_to_archive = match block_import_notification .block_number - .checked_sub(&confirmation_depth_k.into()) + .checked_sub(&confirmation_depth_k) { Some(block_number_to_archive) => block_number_to_archive, None => { @@ -847,7 +841,6 @@ where // previously existing blocks if best_archived_block_number + One::one() != block_number_to_archive { InitializedArchiver { - confirmation_depth_k: _, archiver, best_archived_block: (best_archived_block_hash, best_archived_block_number), } = initialize_archiver(&segment_headers_store, &subspace_link, client.as_ref())?; diff --git a/crates/sc-consensus-subspace/src/slot_worker.rs b/crates/sc-consensus-subspace/src/slot_worker.rs index 199fa093b6..9498bdec0c 100644 --- a/crates/sc-consensus-subspace/src/slot_worker.rs +++ b/crates/sc-consensus-subspace/src/slot_worker.rs @@ -721,7 +721,7 @@ where } fn should_backoff(&self, slot: Slot, chain_head: &Block::Header) -> bool { - if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Some(strategy) = &self.backoff_authoring_blocks { if let Ok(chain_head_slot) = extract_pre_digest(chain_head).map(|digest| digest.slot()) { return strategy.should_backoff( From ead7caab5663b814e553cba1f6ac192ff01087b9 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 07:44:38 +0300 Subject: [PATCH 25/37] Tiny refactoring --- crates/sc-consensus-subspace/src/archiver.rs | 27 ++++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/crates/sc-consensus-subspace/src/archiver.rs b/crates/sc-consensus-subspace/src/archiver.rs index 0fc783f838..76b0b5bbc8 100644 --- a/crates/sc-consensus-subspace/src/archiver.rs +++ b/crates/sc-consensus-subspace/src/archiver.rs @@ -817,18 +817,18 @@ where let archived_segment_notification_sender = subspace_link.archived_segment_notification_sender.clone(); - while let Some(block_import_notification) = block_importing_notification_stream.next().await + while let Some(block_importing_notification) = + block_importing_notification_stream.next().await { - let block_number_to_archive = match block_import_notification - .block_number - .checked_sub(&confirmation_depth_k) - { - Some(block_number_to_archive) => block_number_to_archive, - None => { - // Too early to archive blocks - continue; - } - }; + let importing_block_number = block_importing_notification.block_number; + let block_number_to_archive = + match importing_block_number.checked_sub(&confirmation_depth_k) { + Some(block_number_to_archive) => block_number_to_archive, + None => { + // Too early to archive blocks + continue; + } + }; if best_archived_block_number >= block_number_to_archive { // This block was already archived, skip @@ -860,9 +860,8 @@ where "There was a gap in blockchain history and the last contiguous series of \ blocks starting with doesn't start with archived segment (best archived \ block number {best_archived_block_number}, block number to archive \ - {block_number_to_archive}), block about to be imported {}), archiver can't \ - continue", - block_import_notification.block_number + {block_number_to_archive}), block about to be imported \ + {importing_block_number}), archiver can't continue", ); return Err(sp_blockchain::Error::Consensus(sp_consensus::Error::Other( error.into(), From ff99c402104f2f35175b73677e5800856eeacdbd Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 07:56:08 +0300 Subject: [PATCH 26/37] Replace genesis-only archiver reinitialization support with reinitialization after arbitrary gap to support future Snap sync versions --- crates/sc-consensus-subspace/src/archiver.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/sc-consensus-subspace/src/archiver.rs b/crates/sc-consensus-subspace/src/archiver.rs index 76b0b5bbc8..bb025b2e6a 100644 --- a/crates/sc-consensus-subspace/src/archiver.rs +++ b/crates/sc-consensus-subspace/src/archiver.rs @@ -851,9 +851,11 @@ where // Special sync mode where verified blocks were inserted into blockchain // directly, archiving of this block will naturally happen later continue; - } else if best_archived_block_number.is_zero() { - // We may have imported some block using special sync mode right after genesis, - // in which case archiver will be stuck at genesis block + } else if client.hash(importing_block_number - One::one())?.is_none() { + // We may have imported some block using special sync mode and block we're about + // to import is the first one after the gap at which archiver is supposed to be + // initialized, but we are only about to import it, so wait for the next block + // for now continue; } else { let error = format!( From 900b8083d4d1f7f6dac52e39069ecc12aeddf587 Mon Sep 17 00:00:00 2001 From: asamuj Date: Wed, 17 Jul 2024 15:38:44 +0800 Subject: [PATCH 27/37] Remove useless code --- .../subspace-farmer-components/src/auditing.rs | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/crates/subspace-farmer-components/src/auditing.rs b/crates/subspace-farmer-components/src/auditing.rs index cd79d8c398..5ad0c322cd 100644 --- a/crates/subspace-farmer-components/src/auditing.rs +++ b/crates/subspace-farmer-components/src/auditing.rs @@ -39,8 +39,6 @@ pub struct AuditResult<'a, Sector> { pub sector_index: SectorIndex, /// Solution candidates pub solution_candidates: SolutionCandidates<'a, Sector>, - /// Best solution distance found - pub best_solution_distance: SolutionRange, } /// Chunk candidate, contains one or more potentially winning audit chunks (in case chunk itself was @@ -83,7 +81,7 @@ where error, })?; - let Some((winning_chunks, best_solution_distance)) = map_winning_chunks( + let Some(winning_chunks) = map_winning_chunks( &s_bucket, global_challenge, §or_slot_challenge, @@ -102,7 +100,6 @@ where sector_metadata, winning_chunks.into(), ), - best_solution_distance, })) } @@ -166,7 +163,7 @@ where })); } - let (winning_chunks, best_solution_distance) = map_winning_chunks( + let winning_chunks = map_winning_chunks( &s_bucket, global_challenge, §or_auditing_info.sector_slot_challenge, @@ -183,7 +180,6 @@ where sector_metadata, winning_chunks.into(), ), - best_solution_distance, })) }) .collect() @@ -239,7 +235,7 @@ fn map_winning_chunks( global_challenge: &Blake3Hash, sector_slot_challenge: &SectorSlotChallenge, solution_range: SolutionRange, -) -> Option<(Vec, SolutionRange)> { +) -> Option> { // Map all winning chunks let mut chunk_candidates = s_bucket .array_chunks::<{ Scalar::FULL_BYTES }>() @@ -265,10 +261,5 @@ fn map_winning_chunks( chunk_candidates.sort_by_key(|chunk_candidate| chunk_candidate.solution_distance); - let best_solution_distance = chunk_candidates - .first() - .expect("Not empty, checked above; qed") - .solution_distance; - - Some((chunk_candidates, best_solution_distance)) + Some(chunk_candidates) } From fe19cfb72e95d89e3509fe8e5e958c87e1a44340 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 12:38:16 +0300 Subject: [PATCH 28/37] Free plot cache hashmap once it is no longer usable --- .../src/single_disk_farm/plot_cache.rs | 55 ++++++++++++------- 1 file changed, 36 insertions(+), 19 deletions(-) diff --git a/crates/subspace-farmer/src/single_disk_farm/plot_cache.rs b/crates/subspace-farmer/src/single_disk_farm/plot_cache.rs index 56487dc420..75a2ac2be8 100644 --- a/crates/subspace-farmer/src/single_disk_farm/plot_cache.rs +++ b/crates/subspace-farmer/src/single_disk_farm/plot_cache.rs @@ -56,6 +56,7 @@ pub struct DiskPlotCache { file: Weak, sectors_metadata: Weak>>, cached_pieces: Arc>, + target_sector_count: SectorIndex, sector_size: u64, } @@ -137,6 +138,7 @@ impl DiskPlotCache { file: Arc::downgrade(file), sectors_metadata: Arc::downgrade(sectors_metadata), cached_pieces: Arc::new(RwLock::new(cached_pieces)), + target_sector_count, sector_size, } } @@ -204,14 +206,20 @@ impl DiskPlotCache { let element_offset = u64::from(offset) * u64::from(Self::element_size()); let sectors_metadata = sectors_metadata.read().await; - let plotted_bytes = self.sector_size * sectors_metadata.len() as u64; + let plotted_sectors_count = sectors_metadata.len() as SectorIndex; + let plotted_bytes = self.sector_size * u64::from(plotted_sectors_count); // Make sure offset is after anything that is already plotted if element_offset < plotted_bytes { // Just to be safe, avoid any overlap of write locks drop(sectors_metadata); + let mut cached_pieces = self.cached_pieces.write(); // No space to store more pieces anymore - self.cached_pieces.write().next_offset.take(); + cached_pieces.next_offset.take(); + if plotted_sectors_count == self.target_sector_count { + // Free allocated memory once fully plotted + mem::take(&mut cached_pieces.map); + } return Ok(false); } @@ -253,25 +261,17 @@ impl DiskPlotCache { let offset = self.cached_pieces.read().map.get(key).copied()?; let file = self.file.upgrade()?; - let cached_pieces = Arc::clone(&self.cached_pieces); - let key = key.clone(); let read_fn = move || { let mut element = BytesMut::zeroed(Self::element_size() as usize); - match Self::read_piece_internal(&file, offset, &mut element) { - Ok(Some(_piece_index)) => { - let element = element.freeze(); - let piece = Piece::try_from( - element.slice_ref(&element[PieceIndex::SIZE..][..Piece::SIZE]), - ) - .expect("Correct length; qed"); - Some(piece) - } - _ => { - // Remove entry just in case it was overridden with a sector already - cached_pieces.write().map.remove(&key); - None - } + if let Ok(Some(_piece_index)) = Self::read_piece_internal(&file, offset, &mut element) { + let element = element.freeze(); + let piece = + Piece::try_from(element.slice_ref(&element[PieceIndex::SIZE..][..Piece::SIZE])) + .expect("Correct length; qed"); + Some(piece) + } else { + None } }; // TODO: On Windows spawning blocking task that allows concurrent reads causes huge memory @@ -280,7 +280,7 @@ impl DiskPlotCache { // (Nazar). // See https://github.com/subspace/subspace/issues/2813 and linked forum post for details. // This TODO exists in multiple files - if cfg!(windows) { + let maybe_piece = if cfg!(windows) { task::block_in_place(read_fn) } else { let read_fut = task::spawn_blocking(read_fn); @@ -288,7 +288,24 @@ impl DiskPlotCache { AsyncJoinOnDrop::new(read_fut, false) .await .unwrap_or_default() + }; + + if maybe_piece.is_none() + && let Some(sectors_metadata) = self.sectors_metadata.upgrade() + { + let plotted_sectors_count = sectors_metadata.read().await.len() as SectorIndex; + + let mut cached_pieces = self.cached_pieces.write(); + if plotted_sectors_count == self.target_sector_count { + // Free allocated memory once fully plotted + mem::take(&mut cached_pieces.map); + } else { + // Remove entry just in case it was overridden with a sector already + cached_pieces.map.remove(key); + } } + + maybe_piece } fn read_piece_internal( From 742af9966fcc15c2183d0592cab84a8803835055 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 08:52:58 +0300 Subject: [PATCH 29/37] Optimize archiving during sync --- crates/sc-consensus-subspace/src/archiver.rs | 27 ++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/crates/sc-consensus-subspace/src/archiver.rs b/crates/sc-consensus-subspace/src/archiver.rs index bb025b2e6a..f7fc2f3399 100644 --- a/crates/sc-consensus-subspace/src/archiver.rs +++ b/crates/sc-consensus-subspace/src/archiver.rs @@ -79,7 +79,7 @@ use subspace_archiving::archiver::{Archiver, NewArchivedSegment}; use subspace_core_primitives::crypto::kzg::Kzg; use subspace_core_primitives::objects::BlockObjectMapping; use subspace_core_primitives::{BlockNumber, RecordedHistorySegment, SegmentHeader, SegmentIndex}; -use tracing::{debug, info, warn}; +use tracing::{debug, info, trace, warn}; /// Number of WASM instances is 8, this is a bit lower to avoid warnings exceeding number of /// instances @@ -830,8 +830,31 @@ where } }; - if best_archived_block_number >= block_number_to_archive { + let last_archived_block_number = NumberFor::::from( + segment_headers_store + .last_segment_header() + .expect("Exists after archiver initialization; qed") + .last_archived_block() + .number, + ); + trace!( + %importing_block_number, + %block_number_to_archive, + %best_archived_block_number, + %last_archived_block_number, + "Checking if block needs to be skipped" + ); + if best_archived_block_number >= block_number_to_archive + || last_archived_block_number > block_number_to_archive + { // This block was already archived, skip + debug!( + %importing_block_number, + %block_number_to_archive, + %best_archived_block_number, + %last_archived_block_number, + "Skipping already archived block", + ); continue; } From 8e443fe585028c46d465d0e4239a6fe825a8bd04 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 15:55:08 +0300 Subject: [PATCH 30/37] Tiny refactoring --- .../src/sync_from_dsn/import_blocks.rs | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/crates/subspace-service/src/sync_from_dsn/import_blocks.rs b/crates/subspace-service/src/sync_from_dsn/import_blocks.rs index 678b1b4728..a6ae523307 100644 --- a/crates/subspace-service/src/sync_from_dsn/import_blocks.rs +++ b/crates/subspace-service/src/sync_from_dsn/import_blocks.rs @@ -22,6 +22,7 @@ use sc_client_api::{AuxStore, BlockBackend, HeaderBackend}; use sc_consensus::import_queue::ImportQueueService; use sc_consensus::IncomingBlock; use sc_consensus_subspace::archiver::{decode_block, encode_block, SegmentHeadersStore}; +use sc_service::Error; use sc_tracing::tracing::{debug, trace}; use sp_consensus::BlockOrigin; use sp_runtime::generic::SignedBlock; @@ -53,7 +54,7 @@ pub(super) async fn import_blocks_from_dsn( import_queue_service: &mut IQS, last_processed_segment_index: &mut SegmentIndex, last_processed_block_number: &mut ::Number, -) -> Result +) -> Result where Block: BlockT, AS: AuxStore + Send + Sync + 'static, @@ -63,7 +64,7 @@ where { { let last_segment_header = segment_headers_store.last_segment_header().ok_or_else(|| { - sc_service::Error::Other( + Error::Other( "Archiver needs to be initialized before syncing from DSN to populate the very \ first segment" .to_string(), @@ -82,7 +83,7 @@ where } } - let mut downloaded_blocks = 0; + let mut imported_blocks = 0; let mut reconstructor = Reconstructor::new().map_err(|error| error.to_string())?; // Start from the first unprocessed segment and process all segments known so far let segment_indices_iter = (*last_processed_segment_index + SegmentIndex::ONE) @@ -98,24 +99,25 @@ where .get_segment_header(segment_index) .expect("Statically guaranteed to exist, see checks above; qed"); - trace!( - %segment_index, - last_archived_block_number = %segment_header.last_archived_block().number, - last_archived_block_progress = ?segment_header.last_archived_block().archived_progress, - "Checking segment header" - ); - - let last_archived_block = - NumberFor::::from(segment_header.last_archived_block().number); + let last_archived_block_number = segment_header.last_archived_block().number; let last_archived_block_partial = segment_header .last_archived_block() .archived_progress .partial() .is_some(); + trace!( + %segment_index, + last_archived_block_number, + last_archived_block_partial, + "Checking segment header" + ); + + let last_archived_block_number = NumberFor::::from(last_archived_block_number); + let info = client.info(); // We have already processed this block, it can't change - if last_archived_block <= *last_processed_block_number { + if last_archived_block_number <= *last_processed_block_number { *last_processed_segment_index = segment_index; // Reset reconstructor instance reconstructor = Reconstructor::new().map_err(|error| error.to_string())?; @@ -123,7 +125,7 @@ where } // Just one partial unprocessed block and this was the last segment available, so nothing to // import - if last_archived_block == *last_processed_block_number + One::one() + if last_archived_block_number == *last_processed_block_number + One::one() && last_archived_block_partial && segment_indices_iter.peek().is_none() { @@ -151,7 +153,7 @@ where .expect("Block before best block number must always be found; qed"); if encode_block(signed_block) != block_bytes { - return Err(sc_service::Error::Other( + return Err(Error::Other( "Wrong genesis block, block import failed".to_string(), )); } @@ -181,7 +183,7 @@ where let signed_block = decode_block::(&block_bytes).map_err(|error| error.to_string())?; - *last_processed_block_number = last_archived_block; + *last_processed_block_number = last_archived_block_number; // No need to import blocks that are already present, if block is not present it might // correspond to a short fork, so we need to import it even if we already have another @@ -210,9 +212,9 @@ where skip_execution: false, }); - downloaded_blocks += 1; + imported_blocks += 1; - if downloaded_blocks % 1000 == 0 { + if imported_blocks % 1000 == 0 { debug!("Adding block {} from DSN to the import queue", block_number); } } @@ -237,14 +239,14 @@ where *last_processed_segment_index = segment_index; } - Ok(downloaded_blocks) + Ok(imported_blocks) } pub(super) async fn download_and_reconstruct_blocks( segment_index: SegmentIndex, piece_getter: &PG, reconstructor: &mut Reconstructor, -) -> Result)>, sc_service::Error> +) -> Result)>, Error> where PG: DsnSyncPieceGetter, { From 07d22198e8d22ea47a5c4a8495b521882a6ed60e Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 17 Jul 2024 15:59:05 +0300 Subject: [PATCH 31/37] Avoid unnecessary segment downloading in sync from DSN after Snap sync --- crates/subspace-service/src/sync_from_dsn.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/subspace-service/src/sync_from_dsn.rs b/crates/subspace-service/src/sync_from_dsn.rs index 05d374f5ab..8b3de1f6d7 100644 --- a/crates/subspace-service/src/sync_from_dsn.rs +++ b/crates/subspace-service/src/sync_from_dsn.rs @@ -17,7 +17,6 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_consensus_subspace::{FarmerPublicKey, SubspaceApi}; use sp_runtime::traits::{Block as BlockT, CheckedSub, NumberFor}; -use sp_runtime::Saturating; use std::error::Error; use std::fmt; use std::future::Future; @@ -282,9 +281,7 @@ where let mut last_processed_segment_index = SegmentIndex::ZERO; // TODO: We'll be able to just take finalized block once we are able to decouple pruning from // finality: https://github.com/paritytech/polkadot-sdk/issues/1570 - let mut last_processed_block_number = info - .best_number - .saturating_sub(chain_constants.confirmation_depth_k().into()); + let mut last_processed_block_number = info.best_number; let segment_header_downloader = SegmentHeaderDownloader::new(node); while let Some(reason) = notifications.next().await { From 71e8c217c748f23a65c4bf6b95718f1554062c80 Mon Sep 17 00:00:00 2001 From: vedhavyas Date: Thu, 18 Jul 2024 12:08:21 +0530 Subject: [PATCH 32/37] add additional staking events when nominated and unlocked --- crates/pallet-domains/src/lib.rs | 38 ++++++++++++++-------------- crates/pallet-domains/src/staking.rs | 38 +++++++++++++++++++++++++--- 2 files changed, 53 insertions(+), 23 deletions(-) diff --git a/crates/pallet-domains/src/lib.rs b/crates/pallet-domains/src/lib.rs index 988499144f..7a4ead0cd1 100644 --- a/crates/pallet-domains/src/lib.rs +++ b/crates/pallet-domains/src/lib.rs @@ -914,9 +914,20 @@ mod pallet { operator_id: OperatorId, domain_id: DomainId, }, + NominatedStakedUnlocked { + operator_id: OperatorId, + nominator_id: NominatorId, + unlocked_amount: BalanceOf, + }, + StorageFeeUnlocked { + operator_id: OperatorId, + nominator_id: NominatorId, + storage_fee: BalanceOf, + }, OperatorNominated { operator_id: OperatorId, nominator_id: NominatorId, + amount: BalanceOf, }, DomainInstantiated { domain_id: DomainId, @@ -928,17 +939,13 @@ mod pallet { OperatorDeregistered { operator_id: OperatorId, }, - OperatorUnlocked { - operator_id: OperatorId, - }, - WithdrewStake { + NominatorUnlocked { operator_id: OperatorId, nominator_id: NominatorId, }, - FundsUnlocked { + WithdrewStake { operator_id: OperatorId, nominator_id: NominatorId, - amount: BalanceOf, }, PreferredOperator { operator_id: OperatorId, @@ -1361,11 +1368,6 @@ mod pallet { do_nominate_operator::(operator_id, nominator_id.clone(), amount) .map_err(Error::::from)?; - Self::deposit_event(Event::OperatorNominated { - operator_id, - nominator_id, - }); - Ok(()) } @@ -1434,13 +1436,8 @@ mod pallet { #[pallet::weight(T::WeightInfo::unlock_funds())] pub fn unlock_funds(origin: OriginFor, operator_id: OperatorId) -> DispatchResult { let nominator_id = ensure_signed(origin)?; - let unlocked_funds = do_unlock_funds::(operator_id, nominator_id.clone()) + do_unlock_funds::(operator_id, nominator_id.clone()) .map_err(crate::pallet::Error::::from)?; - Self::deposit_event(Event::FundsUnlocked { - operator_id, - nominator_id, - amount: unlocked_funds, - }); Ok(()) } @@ -1451,10 +1448,13 @@ mod pallet { pub fn unlock_nominator(origin: OriginFor, operator_id: OperatorId) -> DispatchResult { let nominator = ensure_signed(origin)?; - do_unlock_nominator::(operator_id, nominator) + do_unlock_nominator::(operator_id, nominator.clone()) .map_err(crate::pallet::Error::::from)?; - Self::deposit_event(Event::OperatorUnlocked { operator_id }); + Self::deposit_event(Event::NominatorUnlocked { + operator_id, + nominator_id: nominator, + }); Ok(()) } diff --git a/crates/pallet-domains/src/staking.rs b/crates/pallet-domains/src/staking.rs index a310e618f1..025c706bf7 100644 --- a/crates/pallet-domains/src/staking.rs +++ b/crates/pallet-domains/src/staking.rs @@ -592,6 +592,11 @@ pub(crate) fn do_nominate_operator( .map_err(Error::BundleStorageFund)?; hold_deposit::(&nominator_id, operator_id, new_deposit.staking)?; + Pallet::::deposit_event(Event::OperatorNominated { + operator_id, + nominator_id: nominator_id.clone(), + amount: new_deposit.staking, + }); // increment total deposit for operator pool within this epoch operator.deposits_in_epoch = operator @@ -910,7 +915,7 @@ pub(crate) fn do_withdraw_stake( pub(crate) fn do_unlock_funds( operator_id: OperatorId, nominator_id: NominatorId, -) -> Result, Error> { +) -> Result<(), Error> { let operator = Operators::::get(operator_id).ok_or(Error::UnknownOperator)?; ensure!( *operator.status::(operator_id) == OperatorStatus::Registered, @@ -966,6 +971,12 @@ pub(crate) fn do_unlock_funds( ) .map_err(|_| Error::RemoveLock)?; + Pallet::::deposit_event(Event::NominatedStakedUnlocked { + operator_id, + nominator_id: nominator_id.clone(), + unlocked_amount: amount_to_unlock, + }); + // Release storage fund let storage_fund_hold_id = T::HoldIdentifier::storage_fund_withdrawal(operator_id); T::Currency::release( @@ -976,6 +987,12 @@ pub(crate) fn do_unlock_funds( ) .map_err(|_| Error::RemoveLock)?; + Pallet::::deposit_event(Event::StorageFeeUnlocked { + operator_id, + nominator_id: nominator_id.clone(), + storage_fee: storage_fee_refund, + }); + // if there are no withdrawals, then delete the storage as well if withdrawal.withdrawals.is_empty() && withdrawal.withdrawal_in_shares.is_none() { *maybe_withdrawal = None; @@ -990,7 +1007,7 @@ pub(crate) fn do_unlock_funds( }); } - Ok(amount_to_unlock) + Ok(()) }) } @@ -1101,6 +1118,12 @@ pub(crate) fn do_unlock_nominator( ) .map_err(|_| Error::RemoveLock)?; + Pallet::::deposit_event(Event::NominatedStakedUnlocked { + operator_id, + nominator_id: nominator_id.clone(), + unlocked_amount: total_amount_to_unlock, + }); + total_stake = total_stake.saturating_sub(nominator_staked_amount); total_shares = total_shares.saturating_sub(nominator_shares); @@ -1120,8 +1143,15 @@ pub(crate) fn do_unlock_nominator( .map_err(Error::BundleStorageFund)?; // Release all storage fee that of the nominator. - T::Currency::release_all(&storage_fund_hold_id, &nominator_id, Precision::Exact) - .map_err(|_| Error::RemoveLock)?; + let storage_fee_refund = + T::Currency::release_all(&storage_fund_hold_id, &nominator_id, Precision::Exact) + .map_err(|_| Error::RemoveLock)?; + + Pallet::::deposit_event(Event::StorageFeeUnlocked { + operator_id, + nominator_id: nominator_id.clone(), + storage_fee: storage_fee_refund, + }); // reduce total storage fee deposit with nominator total fee deposit total_storage_fee_deposit = From acbc2ff7a5046fadf2aac022a6f8f30e1a4f0dca Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Thu, 18 Jul 2024 13:19:22 +0300 Subject: [PATCH 33/37] Allow importing older blocks in DSN/Snap sync if necessary --- crates/sc-consensus-subspace/src/verifier.rs | 5 ++++- crates/subspace-service/src/sync_from_dsn/import_blocks.rs | 3 ++- crates/subspace-service/src/sync_from_dsn/snap_sync.rs | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/sc-consensus-subspace/src/verifier.rs b/crates/sc-consensus-subspace/src/verifier.rs index 788e200763..3ba04a60ec 100644 --- a/crates/sc-consensus-subspace/src/verifier.rs +++ b/crates/sc-consensus-subspace/src/verifier.rs @@ -484,7 +484,9 @@ where ); let best_number = self.client.info().best_number; + // Reject block below archiving point, but only if we received it from the network if *block.header.number() + self.chain_constants.confirmation_depth_k().into() < best_number + && matches!(block.origin, BlockOrigin::NetworkBroadcast) { debug!( header = ?block.header, @@ -579,7 +581,8 @@ where } = checked_header; let slot = pre_digest.slot(); - // Estimate what the "current" slot is according to sync target since we don't have other way to know it + // Estimate what the "current" slot is according to sync target since we don't have other + // way to know it let diff_in_blocks = self .sync_target_block_number .load(Ordering::Relaxed) diff --git a/crates/subspace-service/src/sync_from_dsn/import_blocks.rs b/crates/subspace-service/src/sync_from_dsn/import_blocks.rs index 678b1b4728..16c0969156 100644 --- a/crates/subspace-service/src/sync_from_dsn/import_blocks.rs +++ b/crates/subspace-service/src/sync_from_dsn/import_blocks.rs @@ -227,7 +227,8 @@ where import_queue_service .import_blocks(BlockOrigin::NetworkInitialSync, blocks_to_import); // This will notify Substrate's sync mechanism and allow regular Substrate sync to continue gracefully - import_queue_service.import_blocks(BlockOrigin::NetworkBroadcast, vec![last_block]); + import_queue_service + .import_blocks(BlockOrigin::ConsensusBroadcast, vec![last_block]); } else { import_queue_service .import_blocks(BlockOrigin::NetworkInitialSync, blocks_to_import); diff --git a/crates/subspace-service/src/sync_from_dsn/snap_sync.rs b/crates/subspace-service/src/sync_from_dsn/snap_sync.rs index a9cfc85738..ed5866e214 100644 --- a/crates/subspace-service/src/sync_from_dsn/snap_sync.rs +++ b/crates/subspace-service/src/sync_from_dsn/snap_sync.rs @@ -294,7 +294,7 @@ where ); import_queue_service - .import_blocks(BlockOrigin::NetworkBroadcast, vec![last_block_to_import]); + .import_blocks(BlockOrigin::ConsensusBroadcast, vec![last_block_to_import]); } // Wait for blocks to be imported From 3dcea8e6f1da9c5c4fdd03dfc21aad509ae19aba Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 19 Jul 2024 22:36:52 +0300 Subject: [PATCH 34/37] Clear block gap proactively after DSN sync as well --- crates/subspace-service/src/sync_from_dsn.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/subspace-service/src/sync_from_dsn.rs b/crates/subspace-service/src/sync_from_dsn.rs index 8b3de1f6d7..33d81346da 100644 --- a/crates/subspace-service/src/sync_from_dsn.rs +++ b/crates/subspace-service/src/sync_from_dsn.rs @@ -13,6 +13,7 @@ use sc_client_api::{AuxStore, BlockBackend, BlockchainEvents}; use sc_consensus::import_queue::ImportQueueService; use sc_consensus_subspace::archiver::SegmentHeadersStore; use sc_network::service::traits::NetworkService; +use sc_service::ClientExt; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_consensus_subspace::{FarmerPublicKey, SubspaceApi}; @@ -84,7 +85,7 @@ enum NotificationReason { /// Create node observer that will track node state and send notifications to worker to start sync /// from DSN. #[allow(clippy::too_many_arguments)] -pub(super) fn create_observer_and_worker( +pub(super) fn create_observer_and_worker( segment_headers_store: SegmentHeadersStore, network_service: Arc, node: Node, @@ -99,11 +100,13 @@ pub(super) fn create_observer_and_worker( ) where Block: BlockT, + Backend: sc_client_api::Backend, AS: AuxStore + Send + Sync + 'static, Client: HeaderBackend + BlockBackend + BlockchainEvents + ProvideRuntimeApi + + ClientExt + Send + Sync + 'static, @@ -247,7 +250,7 @@ async fn create_substrate_network_observer( } #[allow(clippy::too_many_arguments)] -async fn create_worker( +async fn create_worker( segment_headers_store: SegmentHeadersStore, node: &Node, client: &Client, @@ -259,10 +262,12 @@ async fn create_worker( ) -> Result<(), sc_service::Error> where Block: BlockT, + Backend: sc_client_api::Backend, AS: AuxStore + Send + Sync + 'static, Client: HeaderBackend + BlockBackend + ProvideRuntimeApi + + ClientExt + Send + Sync + 'static, @@ -329,6 +334,11 @@ where } } + // Clear the block gap that arises from first block import with a much higher number than + // previously (resulting in a gap) + // TODO: This is a hack and better solution is needed: https://github.com/paritytech/polkadot-sdk/issues/4407 + client.clear_block_gap()?; + debug!("Finished DSN sync"); pause_sync.store(false, Ordering::Release); From 2cc8740d3aab006190b6b4af2712571f37c58a83 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Sat, 20 Jul 2024 18:26:08 +0300 Subject: [PATCH 35/37] Print warning on missing piece cache during scrubbing instead of error --- .../subspace-farmer/src/single_disk_farm.rs | 195 +++++++++--------- 1 file changed, 99 insertions(+), 96 deletions(-) diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index 0a74984edc..9a488bc3f1 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -562,12 +562,6 @@ pub enum SingleDiskFarmScrubError { /// Unexpected metadata version #[error("Unexpected metadata version {0}")] UnexpectedMetadataVersion(u8), - /// Cache file does not exist - #[error("Cache file does not exist at {file}")] - CacheFileDoesNotExist { - /// Cache file - file: PathBuf, - }, /// Cache can't be opened #[error("Cache at {file} can't be opened: {error}")] CacheCantBeOpened { @@ -2284,114 +2278,123 @@ impl SingleDiskFarm { } if target.cache() { - let file = directory.join(DiskPieceCache::FILE_NAME); - info!(path = %file.display(), "Checking cache file"); + Self::scrub_cache(directory, dry_run)?; + } - let cache_file = match OpenOptions::new().read(true).write(!dry_run).open(&file) { - Ok(plot_file) => plot_file, - Err(error) => { - return Err(if error.kind() == io::ErrorKind::NotFound { - SingleDiskFarmScrubError::CacheFileDoesNotExist { file } - } else { - SingleDiskFarmScrubError::CacheCantBeOpened { file, error } - }); - } - }; + info!("Farm check completed"); - // Error doesn't matter here - let _ = cache_file.advise_sequential_access(); + Ok(()) + } - let cache_size = match cache_file.size() { - Ok(metadata_size) => metadata_size, - Err(error) => { - return Err(SingleDiskFarmScrubError::FailedToDetermineFileSize { - file, - error, - }); - } - }; + fn scrub_cache(directory: &Path, dry_run: bool) -> Result<(), SingleDiskFarmScrubError> { + let span = Span::current(); - let element_size = DiskPieceCache::element_size(); - let number_of_cached_elements = cache_size / u64::from(element_size); - let dummy_element = vec![0; element_size as usize]; - (0..number_of_cached_elements) - .into_par_iter() - .map_with(vec![0; element_size as usize], |element, cache_offset| { - let _span_guard = span.enter(); + let file = directory.join(DiskPieceCache::FILE_NAME); + info!(path = %file.display(), "Checking cache file"); - let offset = cache_offset * u64::from(element_size); - if let Err(error) = cache_file.read_exact_at(element, offset) { - warn!( - path = %file.display(), - %cache_offset, - size = %element.len() as u64, - %offset, - %error, - "Failed to read cached piece, replacing with dummy element" - ); + let cache_file = match OpenOptions::new().read(true).write(!dry_run).open(&file) { + Ok(plot_file) => plot_file, + Err(error) => { + return if error.kind() == io::ErrorKind::NotFound { + warn!( + file = %file.display(), + "Cache file does not exist, this is expected in farming cluster" + ); + Ok(()) + } else { + Err(SingleDiskFarmScrubError::CacheCantBeOpened { file, error }) + }; + } + }; - if !dry_run { - if let Err(error) = cache_file.write_all_at(&dummy_element, offset) { - return Err(SingleDiskFarmScrubError::FailedToWriteBytes { - file: file.clone(), - size: u64::from(element_size), - offset, - error, - }); - } - } + // Error doesn't matter here + let _ = cache_file.advise_sequential_access(); - return Ok(()); - } + let cache_size = match cache_file.size() { + Ok(metadata_size) => metadata_size, + Err(error) => { + return Err(SingleDiskFarmScrubError::FailedToDetermineFileSize { file, error }); + } + }; - let (index_and_piece_bytes, expected_checksum) = - element.split_at(element_size as usize - mem::size_of::()); - let actual_checksum = blake3_hash(index_and_piece_bytes); - if actual_checksum != expected_checksum && element != &dummy_element { - warn!( - %cache_offset, - actual_checksum = %hex::encode(actual_checksum), - expected_checksum = %hex::encode(expected_checksum), - "Cached piece checksum mismatch, replacing with dummy element" - ); + let element_size = DiskPieceCache::element_size(); + let number_of_cached_elements = cache_size / u64::from(element_size); + let dummy_element = vec![0; element_size as usize]; + (0..number_of_cached_elements) + .into_par_iter() + .map_with(vec![0; element_size as usize], |element, cache_offset| { + let _span_guard = span.enter(); - if !dry_run { - if let Err(error) = cache_file.write_all_at(&dummy_element, offset) { - return Err(SingleDiskFarmScrubError::FailedToWriteBytes { - file: file.clone(), - size: u64::from(element_size), - offset, - error, - }); - } - } + let offset = cache_offset * u64::from(element_size); + if let Err(error) = cache_file.read_exact_at(element, offset) { + warn!( + path = %file.display(), + %cache_offset, + size = %element.len() as u64, + %offset, + %error, + "Failed to read cached piece, replacing with dummy element" + ); - return Ok(()); + if !dry_run { + if let Err(error) = cache_file.write_all_at(&dummy_element, offset) { + return Err(SingleDiskFarmScrubError::FailedToWriteBytes { + file: file.clone(), + size: u64::from(element_size), + offset, + error, + }); + } } - Ok(()) - }) - .try_for_each({ - let span = &span; - let checked_elements = AtomicUsize::new(0); + return Ok(()); + } - move |result| { - let _span_guard = span.enter(); + let (index_and_piece_bytes, expected_checksum) = + element.split_at(element_size as usize - mem::size_of::()); + let actual_checksum = blake3_hash(index_and_piece_bytes); + if actual_checksum != expected_checksum && element != &dummy_element { + warn!( + %cache_offset, + actual_checksum = %hex::encode(actual_checksum), + expected_checksum = %hex::encode(expected_checksum), + "Cached piece checksum mismatch, replacing with dummy element" + ); - let checked_elements = checked_elements.fetch_add(1, Ordering::Relaxed); - if checked_elements > 1 && checked_elements % 1000 == 0 { - info!( - "Checked {}/{} cache elements", - checked_elements, number_of_cached_elements - ); + if !dry_run { + if let Err(error) = cache_file.write_all_at(&dummy_element, offset) { + return Err(SingleDiskFarmScrubError::FailedToWriteBytes { + file: file.clone(), + size: u64::from(element_size), + offset, + error, + }); } + } - result + return Ok(()); + } + + Ok(()) + }) + .try_for_each({ + let span = &span; + let checked_elements = AtomicUsize::new(0); + + move |result| { + let _span_guard = span.enter(); + + let checked_elements = checked_elements.fetch_add(1, Ordering::Relaxed); + if checked_elements > 1 && checked_elements % 1000 == 0 { + info!( + "Checked {}/{} cache elements", + checked_elements, number_of_cached_elements + ); } - })?; - } - info!("Farm check completed"); + result + } + })?; Ok(()) } From 772ef08e6088d52563a2f86c0bd7953daf0c06e5 Mon Sep 17 00:00:00 2001 From: linning Date: Mon, 22 Jul 2024 17:01:50 +0800 Subject: [PATCH 36/37] Fix typo in MAX_BUNDLE_PER_BLOCK Signed-off-by: linning --- crates/pallet-domains/src/benchmarking.rs | 8 ++++---- crates/pallet-domains/src/lib.rs | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/pallet-domains/src/benchmarking.rs b/crates/pallet-domains/src/benchmarking.rs index 511e4ee828..73c3ff8e86 100644 --- a/crates/pallet-domains/src/benchmarking.rs +++ b/crates/pallet-domains/src/benchmarking.rs @@ -189,7 +189,7 @@ mod benchmarks { /// Benchmark prune bad ER and slash the submitter based on the number of submitter #[benchmark] - fn handle_bad_receipt(n: Linear<1, MAX_BUNLDE_PER_BLOCK>) { + fn handle_bad_receipt(n: Linear<1, MAX_BUNDLE_PER_BLOCK>) { let minimum_nominator_stake = T::MinNominatorStake::get(); let domain_id = register_domain::(); let mut operator_ids = Vec::new(); @@ -246,8 +246,8 @@ mod benchmarks { /// in this block #[benchmark] fn confirm_domain_block( - n: Linear<1, MAX_BUNLDE_PER_BLOCK>, - s: Linear<0, MAX_BUNLDE_PER_BLOCK>, + n: Linear<1, MAX_BUNDLE_PER_BLOCK>, + s: Linear<0, MAX_BUNDLE_PER_BLOCK>, ) { let minimum_nominator_stake = T::MinNominatorStake::get(); let operator_rewards = @@ -314,7 +314,7 @@ mod benchmarks { /// Benchmark `operator_take_reward_tax_and_stake` based on the number of operator who has reward /// in the current epoch #[benchmark] - fn operator_reward_tax_and_restake(n: Linear<1, MAX_BUNLDE_PER_BLOCK>) { + fn operator_reward_tax_and_restake(n: Linear<1, MAX_BUNDLE_PER_BLOCK>) { let minimum_nominator_stake = T::MinNominatorStake::get(); let operator_rewards = T::Currency::minimum_balance().saturating_mul(BalanceOf::::from(1000u32)); diff --git a/crates/pallet-domains/src/lib.rs b/crates/pallet-domains/src/lib.rs index 7a4ead0cd1..dd25a23514 100644 --- a/crates/pallet-domains/src/lib.rs +++ b/crates/pallet-domains/src/lib.rs @@ -159,7 +159,7 @@ const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); /// and based on the consensus chain slot probability and domain bundle slot probability, usually /// the value is 6 on average, smaller/bigger value with less probability, we hypocritically use /// 100 as the maximum number of bundle per block for benchmarking. -const MAX_BUNLDE_PER_BLOCK: u32 = 100; +const MAX_BUNDLE_PER_BLOCK: u32 = 100; pub(crate) type StateRootOf = <::Hashing as Hash>::Output; @@ -204,7 +204,7 @@ mod pallet { use crate::{ BalanceOf, BlockSlot, BlockTreeNodeFor, DomainBlockNumberFor, ElectionVerificationParams, ExecutionReceiptOf, FraudProofFor, HoldIdentifier, NominatorId, OpaqueBundleOf, - ReceiptHashFor, StateRootOf, MAX_BUNLDE_PER_BLOCK, STORAGE_VERSION, + ReceiptHashFor, StateRootOf, MAX_BUNDLE_PER_BLOCK, STORAGE_VERSION, }; #[cfg(not(feature = "std"))] use alloc::string::String; @@ -1203,7 +1203,7 @@ mod pallet { #[pallet::call_index(15)] #[pallet::weight(( T::WeightInfo::submit_fraud_proof().saturating_add( - T::WeightInfo::handle_bad_receipt(MAX_BUNLDE_PER_BLOCK) + T::WeightInfo::handle_bad_receipt(MAX_BUNDLE_PER_BLOCK) ), DispatchClass::Operational ))] @@ -1248,7 +1248,7 @@ mod pallet { .ok_or::>(FraudProofError::BadReceiptNotFound.into())?; actual_weight = actual_weight.saturating_add(T::WeightInfo::handle_bad_receipt( - (block_tree_node.operator_ids.len() as u32).min(MAX_BUNLDE_PER_BLOCK), + (block_tree_node.operator_ids.len() as u32).min(MAX_BUNDLE_PER_BLOCK), )); do_mark_operators_as_slashed::( @@ -1608,7 +1608,7 @@ mod pallet { .ok_or::>(FraudProofError::BadReceiptNotFound.into())?; actual_weight = actual_weight.saturating_add(T::WeightInfo::handle_bad_receipt( - (block_tree_node.operator_ids.len() as u32).min(MAX_BUNLDE_PER_BLOCK), + (block_tree_node.operator_ids.len() as u32).min(MAX_BUNDLE_PER_BLOCK), )); do_mark_operators_as_slashed::( @@ -2545,11 +2545,11 @@ impl Pallet { // NOTE: within `submit_bundle`, only one of (or none) `handle_bad_receipt` and // `confirm_domain_block` can happen, thus we use the `max` of them - // We use `MAX_BUNLDE_PER_BLOCK` number to assume the number of slashed operators. + // We use `MAX_BUNDLE_PER_BLOCK` number to assume the number of slashed operators. // We do not expect so many operators to be slashed but nontheless, if it did happen // we will limit the weight to 100 operators. - T::WeightInfo::handle_bad_receipt(MAX_BUNLDE_PER_BLOCK).max( - T::WeightInfo::confirm_domain_block(MAX_BUNLDE_PER_BLOCK, MAX_BUNLDE_PER_BLOCK), + T::WeightInfo::handle_bad_receipt(MAX_BUNDLE_PER_BLOCK).max( + T::WeightInfo::confirm_domain_block(MAX_BUNDLE_PER_BLOCK, MAX_BUNDLE_PER_BLOCK), ), ) .saturating_add(Self::max_staking_epoch_transition()) @@ -2557,13 +2557,13 @@ impl Pallet { } pub fn max_staking_epoch_transition() -> Weight { - T::WeightInfo::operator_reward_tax_and_restake(MAX_BUNLDE_PER_BLOCK).saturating_add( + T::WeightInfo::operator_reward_tax_and_restake(MAX_BUNDLE_PER_BLOCK).saturating_add( T::WeightInfo::finalize_domain_epoch_staking(T::MaxPendingStakingOperation::get()), ) } pub fn max_prune_domain_execution_receipt() -> Weight { - T::WeightInfo::handle_bad_receipt(MAX_BUNLDE_PER_BLOCK) + T::WeightInfo::handle_bad_receipt(MAX_BUNDLE_PER_BLOCK) .saturating_add(T::DbWeight::get().reads_writes(3, 1)) } From dbae7925f7850a4394fe17e87ad187835e2997a8 Mon Sep 17 00:00:00 2001 From: tedious Date: Mon, 22 Jul 2024 23:41:16 +0800 Subject: [PATCH 37/37] chore: optimize entry usage --- crates/subspace-farmer/src/farm/plotted_pieces.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/subspace-farmer/src/farm/plotted_pieces.rs b/crates/subspace-farmer/src/farm/plotted_pieces.rs index 10355e1ee1..084200f465 100644 --- a/crates/subspace-farmer/src/farm/plotted_pieces.rs +++ b/crates/subspace-farmer/src/farm/plotted_pieces.rs @@ -114,14 +114,10 @@ where piece_offset, }; - match self.pieces.entry(piece_index) { - Entry::Occupied(mut entry) => { - entry.get_mut().push(piece_details); - } - Entry::Vacant(entry) => { - entry.insert(vec![piece_details]); - } - } + self.pieces + .entry(piece_index) + .or_default() + .push(piece_details); } }