diff --git a/cli/src/main.rs b/cli/src/main.rs index f8033d61680..c34acb2f520 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -44,6 +44,9 @@ async fn main() -> error_stack::Result<(), MainError> { iroha_logger::info!( version = env!("CARGO_PKG_VERSION"), git_commit_sha = env!("VERGEN_GIT_SHA"), + peer = %config.common.peer, + chain = %config.common.chain, + listening_on = %config.torii.address.value(), "Hyperledgerいろは2にようこそ!(translation) Welcome to Hyperledger Iroha!" ); diff --git a/client/src/client.rs b/client/src/client.rs index de722f4b38f..c97eda480e9 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -469,7 +469,9 @@ impl Client { tx_builder.set_nonce(nonce); }; - tx_builder.with_metadata(metadata).sign(&self.key_pair) + tx_builder + .with_metadata(metadata) + .sign(self.key_pair.private_key()) } /// Signs transaction @@ -477,7 +479,7 @@ impl Client { /// # Errors /// Fails if signature generation fails pub fn sign_transaction(&self, transaction: TransactionBuilder) -> SignedTransaction { - transaction.sign(&self.key_pair) + transaction.sign(self.key_pair.private_key()) } /// Signs query @@ -1664,20 +1666,12 @@ mod tests { use http::Response; use super::*; - use crate::data_model::{asset::Asset, query::error::QueryExecutionFail, ValidationFail}; + use crate::data_model::{asset::Asset, ValidationFail}; #[test] fn certain_errors() -> Result<()> { let mut sut = QueryResponseHandler::>::new(QueryRequest::dummy()); - let responses = vec![ - ( - StatusCode::UNAUTHORIZED, - ValidationFail::QueryFailed(QueryExecutionFail::Signature( - "whatever".to_owned(), - )), - ), - (StatusCode::UNPROCESSABLE_ENTITY, ValidationFail::TooComplex), - ]; + let responses = vec![(StatusCode::UNPROCESSABLE_ENTITY, ValidationFail::TooComplex)]; for (status_code, err) in responses { let resp = Response::builder().status(status_code).body(err.encode())?; diff --git a/client/tests/integration/asset.rs b/client/tests/integration/asset.rs index d76f6dcc6c7..f0fe33e8fd9 100644 --- a/client/tests/integration/asset.rs +++ b/client/tests/integration/asset.rs @@ -307,7 +307,7 @@ fn find_rate_and_make_exchange_isi_should_succeed() { asset_id.account().clone(), ) .with_instructions([instruction]) - .sign(&owner_key_pair); + .sign(owner_key_pair.private_key()); test_client .submit_transaction_blocking(&transaction) diff --git a/client/tests/integration/domain_owner_permissions.rs b/client/tests/integration/domain_owner_permissions.rs index 58d7991a200..e23303089ea 100644 --- a/client/tests/integration/domain_owner_permissions.rs +++ b/client/tests/integration/domain_owner_permissions.rs @@ -26,7 +26,7 @@ fn domain_owner_domain_permissions() -> Result<()> { // Asset definitions can't be registered by "bob@kingdom" by default let transaction = TransactionBuilder::new(chain_id.clone(), bob_id.clone()) .with_instructions([Register::asset_definition(coin.clone())]) - .sign(&bob_keypair); + .sign(bob_keypair.private_key()); let err = test_client .submit_transaction_blocking(&transaction) .expect_err("Tx should fail due to permissions"); @@ -52,7 +52,7 @@ fn domain_owner_domain_permissions() -> Result<()> { test_client.submit_blocking(Grant::permission(token.clone(), bob_id.clone()))?; let transaction = TransactionBuilder::new(chain_id, bob_id.clone()) .with_instructions([Register::asset_definition(coin)]) - .sign(&bob_keypair); + .sign(bob_keypair.private_key()); test_client.submit_transaction_blocking(&transaction)?; test_client.submit_blocking(Revoke::permission(token, bob_id.clone()))?; @@ -148,7 +148,7 @@ fn domain_owner_asset_definition_permissions() -> Result<()> { let coin = AssetDefinition::numeric(coin_id.clone()); let transaction = TransactionBuilder::new(chain_id, bob_id.clone()) .with_instructions([Register::asset_definition(coin)]) - .sign(&bob_keypair); + .sign(bob_keypair.private_key()); test_client.submit_transaction_blocking(&transaction)?; // check that "alice@wonderland" as owner of domain can transfer asset definitions in her domain @@ -217,7 +217,7 @@ fn domain_owner_asset_permissions() -> Result<()> { Register::asset_definition(coin), Register::asset_definition(store), ]) - .sign(&bob_keypair); + .sign(bob_keypair.private_key()); test_client.submit_transaction_blocking(&transaction)?; // check that "alice@wonderland" as owner of domain can register and unregister assets in her domain diff --git a/client/tests/integration/extra_functional/mod.rs b/client/tests/integration/extra_functional/mod.rs index ad4db9c11d0..ed3bef44f09 100644 --- a/client/tests/integration/extra_functional/mod.rs +++ b/client/tests/integration/extra_functional/mod.rs @@ -1,5 +1,6 @@ mod connected_peers; mod multiple_blocks_created; +mod normal; mod offline_peers; mod restart_peer; mod unregister_peer; diff --git a/client/tests/integration/extra_functional/normal.rs b/client/tests/integration/extra_functional/normal.rs new file mode 100644 index 00000000000..c278deafa96 --- /dev/null +++ b/client/tests/integration/extra_functional/normal.rs @@ -0,0 +1,55 @@ +use std::num::NonZeroU32; + +use iroha::client::{self, Client}; +use iroha_config::parameters::actual::Root as Config; +use iroha_data_model::{asset::AssetDefinitionId, prelude::*}; +use test_network::*; +use tokio::runtime::Runtime; + +#[test] +fn tranasctions_should_be_applied() { + let rt = Runtime::test(); + let (network, iroha) = rt.block_on(async { + let mut configuration = Config::test(); + configuration.chain_wide.max_transactions_in_block = NonZeroU32::new(1).unwrap(); + let network = Network::new_with_offline_peers(Some(configuration), 4, 0, Some(11_300)) + .await + .unwrap(); + let iroha = Client::test(&network.genesis.api_address); + + (network, iroha) + }); + wait_for_genesis_committed(&network.clients(), 0); + + let domain_id = "and".parse::().unwrap(); + let account_id = "ed01201F803CB23B1AAFB958368DF2F67CB78A2D1DFB47FFFC3133718F165F54DFF677@and" + .parse::() + .unwrap(); + let asset_definition_id = "MAY#and".parse::().unwrap(); + let asset_id = + "MAY##ed01201F803CB23B1AAFB958368DF2F67CB78A2D1DFB47FFFC3133718F165F54DFF677@and" + .parse() + .unwrap(); + + let create_domain = Register::domain(Domain::new(domain_id)); + iroha.submit_blocking(create_domain).unwrap(); + + let create_asset = + Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone())); + iroha.submit_blocking(create_asset).unwrap(); + + let create_account = Register::account(Account::new(account_id.clone())); + iroha.submit_blocking(create_account).unwrap(); + + let mint_asset = Mint::asset_numeric( + numeric!(57_787_013_353_273_097_936_105_299_296), + AssetId::new(asset_definition_id.clone(), account_id.clone()), + ); + iroha.submit_blocking(mint_asset).unwrap(); + + let mint_asset = + Mint::asset_numeric(numeric!(1), AssetId::new(asset_definition_id, account_id)); + iroha.submit_blocking(mint_asset).unwrap(); + + iroha.request(client::asset::by_id(asset_id)).unwrap(); +} diff --git a/client/tests/integration/extra_functional/unstable_network.rs b/client/tests/integration/extra_functional/unstable_network.rs index af56f816710..558991f9c64 100644 --- a/client/tests/integration/extra_functional/unstable_network.rs +++ b/client/tests/integration/extra_functional/unstable_network.rs @@ -2,7 +2,7 @@ use std::thread; use iroha::{ client::{self, Client, QueryResult}, - data_model::{prelude::*, Level}, + data_model::prelude::*, }; use iroha_config::parameters::actual::Root as Config; use rand::seq::SliceRandom; @@ -55,7 +55,6 @@ fn unstable_network( let mut configuration = Config::test(); configuration.chain_wide.max_transactions_in_block = MAX_TRANSACTIONS_IN_BLOCK.try_into().unwrap(); - configuration.logger.level = Level::INFO; #[cfg(debug_assertions)] { configuration.sumeragi.debug_force_soft_fork = force_soft_fork; diff --git a/client/tests/integration/permissions.rs b/client/tests/integration/permissions.rs index 975c5862fd5..a48f55ecf6e 100644 --- a/client/tests/integration/permissions.rs +++ b/client/tests/integration/permissions.rs @@ -102,7 +102,7 @@ fn permissions_disallow_asset_transfer() { ); let transfer_tx = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([transfer_asset]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); let err = iroha .submit_transaction_blocking(&transfer_tx) .expect_err("Transaction was not rejected."); @@ -151,7 +151,7 @@ fn permissions_disallow_asset_burn() { ); let burn_tx = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([burn_asset]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); let err = iroha .submit_transaction_blocking(&burn_tx) @@ -239,7 +239,7 @@ fn permissions_differ_not_only_by_names() { let grant_hats_access_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone()) .with_instructions([allow_alice_to_set_key_value_in_hats]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); client .submit_transaction_blocking(&grant_hats_access_tx) .expect("Failed grant permission to modify Mouse's hats"); @@ -275,7 +275,7 @@ fn permissions_differ_not_only_by_names() { let grant_shoes_access_tx = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([allow_alice_to_set_key_value_in_shoes]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); client .submit_transaction_blocking(&grant_shoes_access_tx) @@ -328,7 +328,7 @@ fn stored_vs_granted_token_payload() -> Result<()> { let transaction = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([allow_alice_to_set_key_value_in_mouse_asset]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); iroha .submit_transaction_blocking(&transaction) .expect("Failed to grant permission to alice."); diff --git a/client/tests/integration/roles.rs b/client/tests/integration/roles.rs index 242535f360e..13cf7afb95f 100644 --- a/client/tests/integration/roles.rs +++ b/client/tests/integration/roles.rs @@ -76,7 +76,7 @@ fn register_and_grant_role_for_metadata_access() -> Result<()> { let grant_role = Grant::role(role_id.clone(), alice_id.clone()); let grant_role_tx = TransactionBuilder::new(chain_id, mouse_id.clone()) .with_instructions([grant_role]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); test_client.submit_transaction_blocking(&grant_role_tx)?; // Alice modifies Mouse's metadata @@ -236,7 +236,7 @@ fn grant_revoke_role_permissions() -> Result<()> { let grant_role = Grant::role(role_id.clone(), alice_id.clone()); let grant_role_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone()) .with_instructions([grant_role]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); test_client.submit_transaction_blocking(&grant_role_tx)?; let set_key_value = SetKeyValue::account( @@ -263,7 +263,7 @@ fn grant_revoke_role_permissions() -> Result<()> { // Alice can modify Mouse's metadata after permission token is granted to role let grant_role_permission_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone()) .with_instructions([grant_role_permission]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); test_client.submit_transaction_blocking(&grant_role_permission_tx)?; let found_permissions = test_client .request(FindPermissionsByAccountId::new(alice_id.clone()))? @@ -274,7 +274,7 @@ fn grant_revoke_role_permissions() -> Result<()> { // Alice can't modify Mouse's metadata after permission token is removed from role let revoke_role_permission_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone()) .with_instructions([revoke_role_permission]) - .sign(&mouse_keypair); + .sign(mouse_keypair.private_key()); test_client.submit_transaction_blocking(&revoke_role_permission_tx)?; let found_permissions = test_client .request(FindPermissionsByAccountId::new(alice_id.clone()))? diff --git a/client/tests/integration/tx_chain_id.rs b/client/tests/integration/tx_chain_id.rs index c92723c0cf6..2b3113ae806 100644 --- a/client/tests/integration/tx_chain_id.rs +++ b/client/tests/integration/tx_chain_id.rs @@ -1,5 +1,4 @@ use iroha::data_model::prelude::*; -use iroha_data_model::asset::AssetDefinitionId; use iroha_primitives::numeric::numeric; use test_network::*; use test_samples::gen_account_in; @@ -45,10 +44,10 @@ fn send_tx_with_different_chain_id() { ); let asset_transfer_tx_0 = TransactionBuilder::new(chain_id_0, sender_id.clone()) .with_instructions([transfer_instruction.clone()]) - .sign(&sender_keypair); + .sign(sender_keypair.private_key()); let asset_transfer_tx_1 = TransactionBuilder::new(chain_id_1, sender_id.clone()) .with_instructions([transfer_instruction]) - .sign(&sender_keypair); + .sign(sender_keypair.private_key()); test_client .submit_transaction_blocking(&asset_transfer_tx_0) .unwrap(); diff --git a/client/tests/integration/upgrade.rs b/client/tests/integration/upgrade.rs index 4ecebbf685f..2d06f4a8a75 100644 --- a/client/tests/integration/upgrade.rs +++ b/client/tests/integration/upgrade.rs @@ -4,7 +4,6 @@ use eyre::Result; use futures_util::TryStreamExt as _; use iroha::{ client::{self, Client, QueryResult}, - crypto::KeyPair, data_model::prelude::*, }; use iroha_data_model::parameter::{default::EXECUTOR_FUEL_LIMIT, ParametersBuilder}; @@ -24,11 +23,9 @@ fn executor_upgrade_should_work() -> Result<()> { let admin_id: AccountId = format!("{ADMIN_PUBLIC_KEY_MULTIHASH}@admin") .parse() .unwrap(); - let admin_keypair = KeyPair::new( - admin_id.signatory().clone(), - ADMIN_PRIVATE_KEY_MULTIHASH.parse().unwrap(), - ) - .unwrap(); + let admin_private_key = ADMIN_PRIVATE_KEY_MULTIHASH + .parse::() + .unwrap(); let (_rt, _peer, client) = ::new().with_port(10_795).start_with_runtime(); wait_for_genesis_committed(&vec![client.clone()], 0); @@ -49,7 +46,7 @@ fn executor_upgrade_should_work() -> Result<()> { let transfer_alice_rose = Transfer::asset_numeric(alice_rose, 1u32, admin_id.clone()); let transfer_rose_tx = TransactionBuilder::new(chain_id.clone(), admin_id.clone()) .with_instructions([transfer_alice_rose.clone()]) - .sign(&admin_keypair); + .sign(&admin_private_key); let _ = client .submit_transaction_blocking(&transfer_rose_tx) .expect_err("Should fail"); @@ -63,7 +60,7 @@ fn executor_upgrade_should_work() -> Result<()> { // Creating new transaction instead of cloning, because we need to update it's creation time let transfer_rose_tx = TransactionBuilder::new(chain_id, admin_id.clone()) .with_instructions([transfer_alice_rose]) - .sign(&admin_keypair); + .sign(&admin_private_key); client .submit_transaction_blocking(&transfer_rose_tx) .expect("Should succeed"); diff --git a/client_cli/pytests/src/client_cli/client_cli.py b/client_cli/pytests/src/client_cli/client_cli.py index 6ebcdadafba..27087ac7592 100644 --- a/client_cli/pytests/src/client_cli/client_cli.py +++ b/client_cli/pytests/src/client_cli/client_cli.py @@ -34,7 +34,7 @@ def __init__(self, config: Config): self.stdout = None self.stderr = None self.transaction_hash = None - self._timeout = 20 + self._timeout = 40 def __enter__(self): """ diff --git a/client_cli/src/main.rs b/client_cli/src/main.rs index 50b7bdaba9b..3a2ea0caa3e 100644 --- a/client_cli/src/main.rs +++ b/client_cli/src/main.rs @@ -84,6 +84,7 @@ impl FromStr for MetadataValueArg { struct Args { /// Path to the configuration file #[arg(short, long, value_name("PATH"), value_hint(clap::ValueHint::FilePath))] + #[clap(default_value = "client.toml")] config: PathBuf, /// More verbose output #[arg(short, long)] diff --git a/configs/swarm/executor.wasm b/configs/swarm/executor.wasm index 0ec7ad328a4..c3d58d763d5 100644 Binary files a/configs/swarm/executor.wasm and b/configs/swarm/executor.wasm differ diff --git a/core/benches/blocks/apply_blocks.rs b/core/benches/blocks/apply_blocks.rs index 28daef9e720..eef0b25d7dd 100644 --- a/core/benches/blocks/apply_blocks.rs +++ b/core/benches/blocks/apply_blocks.rs @@ -44,7 +44,7 @@ impl StateApplyBlocks { &mut state_block, instructions, alice_id.clone(), - &alice_keypair, + alice_keypair.private_key(), ); let _events = state_block.apply_without_execution(&block); state_block.commit(); diff --git a/core/benches/blocks/common.rs b/core/benches/blocks/common.rs index 6cf22c3fee7..297bbed5d42 100644 --- a/core/benches/blocks/common.rs +++ b/core/benches/blocks/common.rs @@ -25,25 +25,25 @@ pub fn create_block( state: &mut StateBlock<'_>, instructions: Vec, account_id: AccountId, - key_pair: &KeyPair, + private_key: &PrivateKey, ) -> CommittedBlock { let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let transaction = TransactionBuilder::new(chain_id.clone(), account_id) .with_instructions(instructions) - .sign(key_pair); + .sign(private_key); let limits = state.transaction_executor().transaction_limits; let (peer_public_key, _) = KeyPair::random().into_parts(); let peer_id = PeerId::new("127.0.0.1:8080".parse().unwrap(), peer_public_key); let topology = Topology::new(vec![peer_id]); let block = BlockBuilder::new( - vec![AcceptedTransaction::accept(transaction, &chain_id, &limits).unwrap()], + vec![AcceptedTransaction::accept(transaction, &chain_id, limits).unwrap()], topology.clone(), Vec::new(), ) .chain(0, state) - .sign(key_pair.private_key()) + .sign(private_key) .unpack(|_| {}) .commit(&topology) .unpack(|_| {}) diff --git a/core/benches/blocks/validate_blocks.rs b/core/benches/blocks/validate_blocks.rs index 0d26e9ed407..c9e7e082ab4 100644 --- a/core/benches/blocks/validate_blocks.rs +++ b/core/benches/blocks/validate_blocks.rs @@ -10,7 +10,7 @@ use common::*; pub struct StateValidateBlocks { state: State, instructions: Vec>, - key_pair: KeyPair, + private_key: PrivateKey, account_id: AccountId, } @@ -41,7 +41,7 @@ impl StateValidateBlocks { Self { state, instructions, - key_pair: alice_keypair, + private_key: alice_keypair.private_key().clone(), account_id: alice_id, } } @@ -58,7 +58,7 @@ impl StateValidateBlocks { Self { state, instructions, - key_pair, + private_key, account_id, }: Self, ) { @@ -68,7 +68,7 @@ impl StateValidateBlocks { &mut state_block, instructions, account_id.clone(), - &key_pair, + &private_key, ); let _events = state_block.apply_without_execution(&block); assert_eq!(state_block.height(), i); diff --git a/core/benches/kura.rs b/core/benches/kura.rs index 7f15ed8d8be..bc3b55a49e6 100644 --- a/core/benches/kura.rs +++ b/core/benches/kura.rs @@ -28,12 +28,12 @@ async fn measure_block_size_for_n_executors(n_executors: u32) { let transfer = Transfer::asset_numeric(alice_xor_id, 10u32, bob_id); let tx = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([transfer]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let transaction_limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; - let tx = AcceptedTransaction::accept(tx, &chain_id, &transaction_limits) + let tx = AcceptedTransaction::accept(tx, &chain_id, transaction_limits) .expect("Failed to accept Transaction."); let dir = tempfile::tempdir().expect("Could not create tempfile."); let cfg = Config { diff --git a/core/benches/validation.rs b/core/benches/validation.rs index a4c43460399..009e39fe00a 100644 --- a/core/benches/validation.rs +++ b/core/benches/validation.rs @@ -80,12 +80,12 @@ fn build_test_and_transient_state() -> State { fn accept_transaction(criterion: &mut Criterion) { let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); - let transaction = build_test_transaction(chain_id.clone()).sign(&STARTER_KEYPAIR); + let transaction = build_test_transaction(chain_id.clone()).sign(STARTER_KEYPAIR.private_key()); let mut success_count = 0; let mut failures_count = 0; let _ = criterion.bench_function("accept", |b| { b.iter(|| { - match AcceptedTransaction::accept(transaction.clone(), &chain_id, &TRANSACTION_LIMITS) { + match AcceptedTransaction::accept(transaction.clone(), &chain_id, TRANSACTION_LIMITS) { Ok(_) => success_count += 1, Err(_) => failures_count += 1, } @@ -98,13 +98,13 @@ fn sign_transaction(criterion: &mut Criterion) { let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let transaction = build_test_transaction(chain_id); - let key_pair = KeyPair::random(); + let (_, private_key) = KeyPair::random().into_parts(); let mut count = 0; let _ = criterion.bench_function("sign", |b| { b.iter_batched( || transaction.clone(), |transaction| { - let _: SignedTransaction = transaction.sign(&key_pair); + let _: SignedTransaction = transaction.sign(&private_key); count += 1; }, BatchSize::SmallInput, @@ -117,9 +117,9 @@ fn validate_transaction(criterion: &mut Criterion) { let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let transaction = AcceptedTransaction::accept( - build_test_transaction(chain_id.clone()).sign(&STARTER_KEYPAIR), + build_test_transaction(chain_id.clone()).sign(STARTER_KEYPAIR.private_key()), &chain_id, - &TRANSACTION_LIMITS, + TRANSACTION_LIMITS, ) .expect("Failed to accept transaction."); let mut success_count = 0; @@ -142,9 +142,9 @@ fn sign_blocks(criterion: &mut Criterion) { let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let transaction = AcceptedTransaction::accept( - build_test_transaction(chain_id.clone()).sign(&STARTER_KEYPAIR), + build_test_transaction(chain_id.clone()).sign(STARTER_KEYPAIR.private_key()), &chain_id, - &TRANSACTION_LIMITS, + TRANSACTION_LIMITS, ) .expect("Failed to accept transaction."); let kura = iroha_core::kura::Kura::blank_kura_for_testing(); diff --git a/core/src/block.rs b/core/src/block.rs index 63dd1ee9ee0..19d2fd2487e 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -131,8 +131,6 @@ mod pending { commit_topology: Topology, event_recommendations: Vec, ) -> Self { - assert!(!transactions.is_empty(), "Empty block created"); - Self(Pending { commit_topology, transactions, @@ -158,14 +156,17 @@ mod pending { .iter() .map(|value| value.as_ref().hash()) .collect::>() - .hash(), + .hash() + .expect("INTERNAL BUG: Empty block created"), timestamp_ms: SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("INTERNAL BUG: Failed to get the current system time") .as_millis() .try_into() - .expect("INTERNAL BUG: Time should fit into u64"), - view_change_index: view_change_index as u32, + .expect("Time should fit into u64"), + view_change_index: view_change_index + .try_into() + .expect("View change index should fit into u32"), consensus_estimation_ms: consensus_estimation .as_millis() .try_into() @@ -244,7 +245,7 @@ mod chained { mod valid { use indexmap::IndexMap; - use iroha_data_model::ChainId; + use iroha_data_model::{account::AccountId, ChainId}; use super::*; use crate::{state::StateBlock, sumeragi::network_topology::Role}; @@ -294,24 +295,14 @@ mod valid { block: &SignedBlock, topology: &Topology, ) -> Result<(), SignatureVerificationError> { - // TODO: ? - //let roles: &[Role] = if topology.view_change_index() >= 1 { - // &[Role::ValidatingPeer, Role::ObservingPeer] - //} else { - // if topology - // .filter_signatures_by_roles(&[Role::ObservingPeer], block.signatures()) - // .next() - // .is_some() - // { - // return Err(SignatureVerificationError::UnknownSignatory); - // } - - // &[Role::ValidatingPeer] - //}; - let roles = &[Role::ValidatingPeer, Role::ObservingPeer]; + let valid_roles: &[Role] = if topology.view_change_index() >= 1 { + &[Role::ValidatingPeer, Role::ObservingPeer] + } else { + &[Role::ValidatingPeer] + }; topology - .filter_signatures_by_roles(roles, block.signatures()) + .filter_signatures_by_roles(valid_roles, block.signatures()) .try_fold(IndexMap::::default(), |mut acc, signature| { let signatory_idx = usize::try_from(signature.0) .map_err(|_err| SignatureVerificationError::UnknownSignatory)?; @@ -338,6 +329,13 @@ mod valid { Ok(()) })?; + Ok(()) + } + + fn verify_no_undefined_signatures( + block: &SignedBlock, + topology: &Topology, + ) -> Result<(), SignatureVerificationError> { if topology .filter_signatures_by_roles(&[Role::Undefined], block.signatures()) .next() @@ -403,7 +401,7 @@ mod valid { block: SignedBlock, topology: &Topology, expected_chain_id: &ChainId, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, state_block: &mut StateBlock<'_>, ) -> WithEvents> { let expected_block_height = state_block.height() + 1; @@ -439,6 +437,7 @@ mod valid { if !block.header().is_genesis() { if let Err(err) = Self::verify_leader_signature(&block, topology) .and_then(|()| Self::verify_validator_signatures(&block, topology)) + .and_then(|()| Self::verify_no_undefined_signatures(&block, topology)) { return WithEvents::new(Err((block, err.into()))); } @@ -469,12 +468,9 @@ mod valid { ))); } - if let Err(error) = Self::validate_transactions( - &block, - expected_chain_id, - genesis_public_key, - state_block, - ) { + if let Err(error) = + Self::validate_transactions(&block, expected_chain_id, genesis_account, state_block) + { return WithEvents::new(Err((block, error.into()))); } @@ -484,7 +480,7 @@ mod valid { fn validate_transactions( block: &SignedBlock, expected_chain_id: &ChainId, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, state_block: &mut StateBlock<'_>, ) -> Result<(), TransactionValidationError> { let is_genesis = block.header().is_genesis(); @@ -495,16 +491,19 @@ mod valid { .cloned() .try_for_each(|CommittedTransaction { value, error }| { let transaction_executor = state_block.transaction_executor(); - let limits = &transaction_executor.transaction_limits; let tx = if is_genesis { AcceptedTransaction::accept_genesis( GenesisTransaction(value), expected_chain_id, - genesis_public_key, + genesis_account, ) } else { - AcceptedTransaction::accept(value, expected_chain_id, limits) + AcceptedTransaction::accept( + value, + expected_chain_id, + transaction_executor.transaction_limits, + ) }?; if error.is_some() { @@ -531,9 +530,21 @@ mod valid { &mut self, signature: BlockSignature, topology: &Topology, - ) -> Result<(), iroha_crypto::Error> { - let signatory = &topology.as_ref()[signature.0 as usize]; - self.0.add_signature(signature, signatory.public_key()) + ) -> Result<(), SignatureVerificationError> { + let signatory_idx = usize::try_from(signature.0) + .expect("INTERNAL BUG: Number of peers exceeds usize::MAX"); + let signatory = &topology.as_ref()[signatory_idx]; + + assert_ne!(Role::Leader, topology.role(signatory)); + if topology.view_change_index() == 0 { + assert_ne!(Role::ObservingPeer, topology.role(signatory),); + } + assert_ne!(Role::Undefined, topology.role(signatory)); + assert_ne!(Role::ProxyTail, topology.role(signatory)); + + self.0 + .add_signature(signature, signatory.public_key()) + .map_err(|_err| SignatureVerificationError::UnknownSignature) } /// Replace block's signatures. Returns previous block signatures @@ -553,6 +564,7 @@ mod valid { if let Err(err) = Self::verify_leader_signature(self.as_ref(), topology) .and_then(|()| Self::verify_validator_signatures(self.as_ref(), topology)) + .and_then(|()| Self::verify_no_undefined_signatures(self.as_ref(), topology)) { self.0.replace_signatures_unchecked(prev_signatures); WithEvents::new(Err(err)) @@ -613,7 +625,9 @@ mod valid { header: BlockHeader { height: 2, prev_block_hash: None, - transactions_hash: None, + transactions_hash: HashOf::from_untyped_unchecked(Hash::prehashed( + [1; Hash::LENGTH], + )), timestamp_ms: 0, view_change_index: 0, consensus_estimation_ms: 4_000, @@ -644,13 +658,9 @@ mod valid { #[cfg(test)] mod tests { use iroha_crypto::SignatureOf; - use iroha_primitives::unique_vec::UniqueVec; use super::*; - use crate::{ - kura::Kura, query::store::LiveQueryStore, state::State, - sumeragi::network_topology::test_peers, - }; + use crate::sumeragi::network_topology::test_peers; #[test] fn signature_verification_ok() { @@ -901,7 +911,7 @@ mod tests { use iroha_data_model::prelude::*; use iroha_genesis::GENESIS_DOMAIN_ID; use iroha_primitives::unique_vec::UniqueVec; - use test_samples::gen_account_in; + use test_samples::{gen_account_in, SAMPLE_GENESIS_ACCOUNT_ID}; use super::*; use crate::{ @@ -946,10 +956,10 @@ mod tests { Register::asset_definition(AssetDefinition::numeric(asset_definition_id)); // Making two transactions that have the same instruction - let transaction_limits = &state_block.transaction_executor().transaction_limits; + let transaction_limits = state_block.transaction_executor().transaction_limits; let tx = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions([create_asset_definition]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx = AcceptedTransaction::accept(tx, &chain_id, transaction_limits).expect("Valid"); // Creating a block of two identical transactions and validating it @@ -1003,10 +1013,10 @@ mod tests { Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone())); // Making two transactions that have the same instruction - let transaction_limits = &state_block.transaction_executor().transaction_limits; + let transaction_limits = state_block.transaction_executor().transaction_limits; let tx = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([create_asset_definition]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx = AcceptedTransaction::accept(tx, &chain_id, transaction_limits).expect("Valid"); let fail_mint = Mint::asset_numeric( @@ -1019,12 +1029,12 @@ mod tests { let tx0 = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([fail_mint]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx0 = AcceptedTransaction::accept(tx0, &chain_id, transaction_limits).expect("Valid"); let tx2 = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions([succeed_mint]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx2 = AcceptedTransaction::accept(tx2, &chain_id, transaction_limits).expect("Valid"); // Creating a block of two identical transactions and validating it @@ -1074,7 +1084,7 @@ mod tests { let query_handle = LiveQueryStore::test().start(); let state = State::new(world, kura, query_handle); let mut state_block = state.block(); - let transaction_limits = &state_block.transaction_executor().transaction_limits; + let transaction_limits = state_block.transaction_executor().transaction_limits; let domain_id = DomainId::from_str("domain").expect("Valid"); let create_domain = Register::domain(Domain::new(domain_id)); @@ -1088,12 +1098,12 @@ mod tests { let instructions_accept: [InstructionBox; 2] = [create_domain.into(), create_asset.into()]; let tx_fail = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions(instructions_fail) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx_fail = AcceptedTransaction::accept(tx_fail, &chain_id, transaction_limits).expect("Valid"); let tx_accept = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions(instructions_accept) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx_accept = AcceptedTransaction::accept(tx_accept, &chain_id, transaction_limits).expect("Valid"); @@ -1168,11 +1178,11 @@ mod tests { // Sign with `genesis_wrong_key` as peer which has incorrect genesis key pair let tx = TransactionBuilder::new(chain_id.clone(), genesis_wrong_account_id.clone()) .with_instructions([isi]) - .sign(&genesis_wrong_key); + .sign(genesis_wrong_key.private_key()); let tx = AcceptedTransaction::accept_genesis( iroha_genesis::GenesisTransaction(tx), &chain_id, - genesis_wrong_key.public_key(), + &SAMPLE_GENESIS_ACCOUNT_ID, ) .expect("Valid"); @@ -1193,7 +1203,7 @@ mod tests { block, &topology, &chain_id, - genesis_correct_key.public_key(), + &SAMPLE_GENESIS_ACCOUNT_ID, &mut state_block, ) .unpack(|_| {}) diff --git a/core/src/gossiper.rs b/core/src/gossiper.rs index c964bbb151f..4f5018aa9f6 100644 --- a/core/src/gossiper.rs +++ b/core/src/gossiper.rs @@ -110,7 +110,7 @@ impl TransactionGossiper { let state_view = self.state.view(); for tx in txs { - let transaction_limits = &state_view.config.transaction_limits; + let transaction_limits = state_view.config.transaction_limits; match AcceptedTransaction::accept(tx, &self.chain_id, transaction_limits) { Ok(tx) => match self.queue.push(tx, &state_view) { diff --git a/core/src/queue.rs b/core/src/queue.rs index 1fb321da655..1e343eb4033 100644 --- a/core/src/queue.rs +++ b/core/src/queue.rs @@ -22,12 +22,6 @@ use crate::{prelude::*, EventsSender}; impl AcceptedTransaction { // TODO: We should have another type of transaction like `CheckedTransaction` in the type system? - fn is_signatory_consistent(&self) -> bool { - let authority = self.as_ref().authority(); - let signatory = &self.as_ref().signature().0; - authority.signatory_matches(signatory) - } - /// Check if [`self`] is committed or rejected. fn is_in_blockchain(&self, state_view: &StateView<'_>) -> bool { state_view.has_transaction(self.as_ref().hash()) @@ -77,8 +71,6 @@ pub enum Error { MaximumTransactionsPerUser, /// The transaction is already in the queue IsInQueue, - /// Signatories in signature and payload mismatch - SignatoryInconsistent, } /// Failure that can pop up when pushing transaction into the queue @@ -175,8 +167,6 @@ impl Queue { Err(Error::Expired) } else if tx.is_in_blockchain(state_view) { Err(Error::InBlockchain) - } else if !tx.is_signatory_consistent() { - Err(Error::SignatoryInconsistent) } else { Ok(()) } @@ -436,12 +426,12 @@ pub mod tests { let tx = TransactionBuilder::new_with_time_source(chain_id.clone(), account_id, time_source) .with_instructions(instructions) - .sign(key_pair); + .sign(key_pair.private_key()); let limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; - AcceptedTransaction::accept(tx, &chain_id, &limits).expect("Failed to accept Transaction.") + AcceptedTransaction::accept(tx, &chain_id, limits).expect("Failed to accept Transaction.") } pub fn world_with_test_domains() -> World { @@ -687,13 +677,13 @@ pub mod tests { TransactionBuilder::new_with_time_source(chain_id.clone(), alice_id, &time_source) .with_instructions(instructions); tx.set_ttl(Duration::from_millis(TTL_MS)); - let tx = tx.sign(&alice_keypair); + let tx = tx.sign(alice_keypair.private_key()); let limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; let tx_hash = tx.hash(); - let tx = AcceptedTransaction::accept(tx, &chain_id, &limits) + let tx = AcceptedTransaction::accept(tx, &chain_id, limits) .expect("Failed to accept Transaction."); queue .push(tx.clone(), &state_view) diff --git a/core/src/smartcontracts/isi/mod.rs b/core/src/smartcontracts/isi/mod.rs index 46acb4a1fc0..e5746fdb1c3 100644 --- a/core/src/smartcontracts/isi/mod.rs +++ b/core/src/smartcontracts/isi/mod.rs @@ -496,8 +496,8 @@ mod tests { let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), SAMPLE_GENESIS_ACCOUNT_ID.clone()) .with_instructions(instructions) - .sign(&SAMPLE_GENESIS_ACCOUNT_KEYPAIR); - let tx_limits = &state_block.transaction_executor().transaction_limits; + .sign(SAMPLE_GENESIS_ACCOUNT_KEYPAIR.private_key()); + let tx_limits = state_block.transaction_executor().transaction_limits; assert!(matches!( AcceptedTransaction::accept(tx, &chain_id, tx_limits), Err(AcceptTransactionFail::UnexpectedGenesisAccountSignature) diff --git a/core/src/smartcontracts/isi/query.rs b/core/src/smartcontracts/isi/query.rs index 5129709d5aa..fcd6e87a5f4 100644 --- a/core/src/smartcontracts/isi/query.rs +++ b/core/src/smartcontracts/isi/query.rs @@ -187,12 +187,6 @@ impl ValidQueryRequest { query: SignedQuery, state_ro: &impl StateReadOnly, ) -> Result { - if !query.authority().signatory_matches(&query.signature().0) { - return Err(Error::Signature(String::from( - "Signature public key doesn't correspond to the account.", - )) - .into()); - } state_ro.world().executor().validate_query( state_ro, query.authority(), @@ -401,15 +395,15 @@ mod tests { let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), ALICE_ID.clone()) .with_instructions(instructions) - .sign(&ALICE_KEYPAIR); - AcceptedTransaction::accept(tx, &chain_id, &limits)? + .sign(ALICE_KEYPAIR.private_key()); + AcceptedTransaction::accept(tx, &chain_id, limits)? }; let invalid_tx = { let isi = Fail::new("fail".to_owned()); let tx = TransactionBuilder::new(chain_id.clone(), ALICE_ID.clone()) .with_instructions([isi.clone(), isi]) - .sign(&ALICE_KEYPAIR); - AcceptedTransaction::accept(tx, &chain_id, &huge_limits)? + .sign(ALICE_KEYPAIR.private_key()); + AcceptedTransaction::accept(tx, &chain_id, huge_limits)? }; let mut transactions = vec![valid_tx; valid_tx_per_block]; @@ -566,9 +560,9 @@ mod tests { let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), ALICE_ID.clone()) .with_instructions(instructions) - .sign(&ALICE_KEYPAIR); + .sign(ALICE_KEYPAIR.private_key()); - let tx_limits = &state_block.transaction_executor().transaction_limits; + let tx_limits = state_block.transaction_executor().transaction_limits; let va_tx = AcceptedTransaction::accept(tx, &chain_id, tx_limits)?; let (peer_public_key, _) = KeyPair::random().into_parts(); @@ -590,7 +584,7 @@ mod tests { let unapplied_tx = TransactionBuilder::new(chain_id, ALICE_ID.clone()) .with_instructions([Unregister::account(gen_account_in("domain").0)]) - .sign(&ALICE_KEYPAIR); + .sign(ALICE_KEYPAIR.private_key()); let wrong_hash = unapplied_tx.hash(); let not_found = FindTransactionByHash::new(wrong_hash).execute(&state_view); assert!(matches!( diff --git a/core/src/state.rs b/core/src/state.rs index d869a2ed17c..03625f53dc0 100644 --- a/core/src/state.rs +++ b/core/src/state.rs @@ -1360,7 +1360,7 @@ impl StateTransaction<'_, '_> { ) -> Self { if let Some(param) = self.0 { if param.id().name().as_ref() == id { - if let Some(value) = param.val.try_into().ok() { + if let Ok(value) = param.val.try_into() { fun(value); } Self(None) diff --git a/core/src/sumeragi/main_loop.rs b/core/src/sumeragi/main_loop.rs index a10e32125ef..ba0896e1935 100644 --- a/core/src/sumeragi/main_loop.rs +++ b/core/src/sumeragi/main_loop.rs @@ -1,5 +1,5 @@ //! The main event loop that powers sumeragi. -use std::sync::mpsc; +use std::{collections::BTreeSet, sync::mpsc}; use iroha_crypto::{HashOf, KeyPair}; use iroha_data_model::{block::*, events::pipeline::PipelineEventBox, peer::PeerId}; @@ -211,7 +211,7 @@ impl Sumeragi { fn init_listen_for_genesis( &mut self, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, state: &State, shutdown_receiver: &mut tokio::sync::oneshot::Receiver<()>, ) -> Result<(), EarlyReturn> { @@ -244,7 +244,7 @@ impl Sumeragi { block, &self.topology, &self.chain_id, - genesis_public_key, + genesis_account, &mut state_block, ) .unpack(|e| self.send_event(e)) @@ -288,10 +288,10 @@ impl Sumeragi { } } - fn sumeragi_init_commit_genesis( + fn init_commit_genesis( &mut self, genesis_transaction: GenesisTransaction, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, state: &State, ) { std::thread::sleep(Duration::from_millis(250)); // TODO: Why this sleep? @@ -305,7 +305,7 @@ impl Sumeragi { let transaction = AcceptedTransaction::accept_genesis( genesis_transaction, &self.chain_id, - genesis_public_key, + genesis_account, ) .expect("Genesis invalid"); let transactions = vec![transaction]; @@ -356,14 +356,18 @@ impl Sumeragi { .block_committed(block.as_ref(), state_block.world.peers().cloned()); self.connect_peers(&self.topology); + let block_hash = block.as_ref().hash(); + let block_height = block.as_ref().header().height(); + Strategy::kura_store_block(&self.kura, block); + // Commit new block making it's effect visible for the rest of application state_block.commit(); info!( peer_id=%self.peer_id, %prev_role, next_role=%self.role(), - block_hash=%block.as_ref().hash(), - new_height=%block.as_ref().header().height, + block_hash=%block_hash, + new_height=%block_height, "{}", Strategy::LOG_MESSAGE, ); #[cfg(debug_assertions)] @@ -374,8 +378,6 @@ impl Sumeragi { "Topology after commit" ); - Strategy::kura_store_block(&self.kura, block); - // NOTE: This sends `BlockStatus::Applied` event, // so it should be done AFTER public facing state update state_events.into_iter().for_each(|e| self.send_event(e)); @@ -400,7 +402,7 @@ impl Sumeragi { &self, state: &'state State, topology: &Topology, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, BlockCreated { block }: BlockCreated, ) -> Option> { let mut state_block = state.block(); @@ -409,7 +411,7 @@ impl Sumeragi { block, topology, &self.chain_id, - genesis_public_key, + genesis_account, &mut state_block, ) .unpack(|e| self.send_event(e)) @@ -436,14 +438,15 @@ impl Sumeragi { } #[allow(clippy::too_many_lines)] + #[allow(clippy::too_many_arguments)] fn handle_message<'state>( &mut self, message: BlockMessage, state: &'state State, voting_block: &mut Option>, view_change_index: usize, - genesis_public_key: &PublicKey, - voting_signatures: &mut Vec, + genesis_account: &AccountId, + voting_signatures: &mut BTreeSet, #[cfg_attr(not(debug_assertions), allow(unused_variables))] is_genesis_peer: bool, ) { #[allow(clippy::suspicious_operation_groupings)] @@ -459,9 +462,10 @@ impl Sumeragi { // Release block writer before creating new one // FIX: Restore `voting_block` if `handle_block_sync` returns Err // Currently it's not possible because block writer needs to be released + // Look at https://github.com/hyperledger/iroha/issues/4643 let _ = voting_block.take(); - match handle_block_sync(&self.chain_id, block, state, genesis_public_key, &|e| { + match handle_block_sync(&self.chain_id, block, state, genesis_account, &|e| { self.send_event(e) }) { Ok(BlockSyncOk::CommitBlock(block, state_block, topology)) => { @@ -548,7 +552,7 @@ impl Sumeragi { let _ = voting_block.take(); if let Some(mut v_block) = - self.validate_block(state, topology, genesis_public_key, block_created) + self.validate_block(state, topology, genesis_account, block_created) { v_block.block.sign(&self.key_pair, topology); @@ -559,6 +563,7 @@ impl Sumeragi { } else { // FIX: Restore `voting_block` // Currently it's not possible because block writer needs to be released + // Look at https://github.com/hyperledger/iroha/issues/4643 } } (BlockMessage::BlockCreated(block_created), Role::ObservingPeer) => { @@ -571,7 +576,7 @@ impl Sumeragi { let _ = voting_block.take(); if let Some(mut v_block) = - self.validate_block(state, topology, genesis_public_key, block_created) + self.validate_block(state, topology, genesis_account, block_created) { if view_change_index >= 1 { v_block.block.sign(&self.key_pair, topology); @@ -591,6 +596,7 @@ impl Sumeragi { } else { // FIX: Restore `voting_block` // Currently it's not possible because block writer needs to be released + // Look at https://github.com/hyperledger/iroha/issues/4643 } } (BlockMessage::BlockCreated(block_created), Role::ProxyTail) => { @@ -605,86 +611,155 @@ impl Sumeragi { let _ = voting_block.take(); if let Some(mut valid_block) = - self.validate_block(state, &self.topology, genesis_public_key, block_created) + self.validate_block(state, &self.topology, genesis_account, block_created) { // NOTE: Up until this point it was unknown which block is expected to be received, // therefore all the signatures (of any hash) were collected and will now be pruned - add_signatures::( - &mut valid_block, - voting_signatures.drain(..), - &self.topology, - ); + + for signature in core::mem::take(voting_signatures) { + if let Err(error) = + valid_block.block.add_signature(signature, &self.topology) + { + debug!(?error, "Signature not valid"); + } + } *voting_block = self.try_commit_block(valid_block, is_genesis_peer); } else { // FIX: Restore `voting_block` // Currently it's not possible because block writer needs to be released + // Look at https://github.com/hyperledger/iroha/issues/4643 } } - (BlockMessage::BlockSigned(BlockSigned { signatures }), Role::ProxyTail) => { + (BlockMessage::BlockSigned(BlockSigned { hash, signature }), Role::ProxyTail) => { info!( peer_id=%self.peer_id, role=%self.role(), "Received block signatures" ); - let roles: &[Role] = if view_change_index >= 1 { - &[Role::ValidatingPeer, Role::ObservingPeer] - } else { - &[Role::ValidatingPeer] - }; - let valid_signatures = self - .topology - .filter_signatures_by_roles(roles, &signatures) - .cloned(); + if let Ok(signatory_idx) = usize::try_from(signature.0) { + let signatory = &self.topology.as_ref()[signatory_idx]; - if let Some(mut voted_block) = voting_block.take() { - add_signatures::(&mut voted_block, valid_signatures, &self.topology); - *voting_block = self.try_commit_block(voted_block, is_genesis_peer); + match self.topology.role(signatory) { + Role::Leader => error!( + peer_id=%self.peer_id, + role=%self.role(), + "Signatory is leader" + ), + Role::Undefined => error!( + peer_id=%self.peer_id, + role=%self.role(), + "Unknown signatory" + ), + Role::ObservingPeer if view_change_index == 0 => error!( + peer_id=%self.peer_id, + role=%self.role(), + "Signatory is observing peer" + ), + Role::ProxyTail => error!( + peer_id=%self.peer_id, + role=%self.role(), + "Signatory is proxy tail" + ), + _ => { + if let Some(mut voted_block) = voting_block.take() { + let actual_hash = voted_block.block.as_ref().hash_of_payload(); + + if hash != actual_hash { + error!( + peer_id=%self.peer_id, + role=%self.role(), + expected_hash=?hash, + ?actual_hash, + "Block hash mismatch" + ); + } else if let Err(err) = + voted_block.block.add_signature(signature, &self.topology) + { + error!( + peer_id=%self.peer_id, + role=%self.role(), + ?err, + "Signature not valid" + ); + } else { + *voting_block = + self.try_commit_block(voted_block, is_genesis_peer); + } + } else { + // NOTE: Due to the nature of distributed systems, signatures can sometimes be received before + // the block (sent by the leader). Collect the signatures and wait for the block to be received + if !voting_signatures.insert(signature) { + error!( + peer_id=%self.peer_id, + role=%self.role(), + "Duplicate signature" + ); + } + } + } + } } else { - // NOTE: Due to the nature of distributed systems, signatures can sometimes be received before - // the block (sent by the leader). Collect the signatures and wait for the block to be received - voting_signatures.extend(valid_signatures); + error!( + peer_id=%self.peer_id, + role=%self.role(), + "Signatory index exceeds usize::MAX" + ); } } (BlockMessage::BlockCommitted(BlockCommitted { .. }), Role::Leader) if self.topology.is_consensus_required().is_none() => {} ( - BlockMessage::BlockCommitted(BlockCommitted { signatures }), + BlockMessage::BlockCommitted(BlockCommitted { hash, signatures }), Role::Leader | Role::ValidatingPeer | Role::ObservingPeer, ) => { if let Some(mut voted_block) = voting_block.take() { match voted_block .block // NOTE: The manipulation of the topology relies upon all peers seeing the same signature set. - // Therefore we must clear the signatures and accept what the proxy tail giveth. + // Therefore we must clear the signatures and accept what the proxy tail has giveth. .replace_signatures(signatures, &self.topology) .unpack(|e| self.send_event(e)) { Ok(prev_signatures) => { - match voted_block - .block - .commit(&self.topology) - .unpack(|e| self.send_event(e)) - { - Ok(committed_block) => { - self.commit_block(committed_block, voted_block.block_state) - } - Err((mut block, error)) => { - error!( - peer_id=%self.peer_id, - role=%self.role(), - ?error, - "Block failed to be committed" - ); - - block - .replace_signatures(prev_signatures, &self.topology) - .unpack(|e| self.send_event(e)) - .expect("INTERNAL BUG: Failed to replace signatures"); - voted_block.block = block; - *voting_block = Some(voted_block); + // NOTE: Block hash depends on signatures, therefore + // it must be calculated after replacing signatures + let actual_hash = voted_block.block.as_ref().hash(); + + if actual_hash == hash { + match voted_block + .block + .commit(&self.topology) + .unpack(|e| self.send_event(e)) + { + Ok(committed_block) => { + self.commit_block(committed_block, voted_block.state_block) + } + Err((mut block, error)) => { + error!( + peer_id=%self.peer_id, + role=%self.role(), + ?error, + "Block failed to be committed" + ); + + block + .replace_signatures(prev_signatures, &self.topology) + .unpack(|e| self.send_event(e)) + .expect("INTERNAL BUG: Failed to replace signatures"); + voted_block.block = block; + *voting_block = Some(voted_block); + } } + } else { + error!( + peer_id=%self.peer_id, + role=%self.role(), + expected_hash=?hash, + ?actual_hash, + "Block hash mismatch" + ); } } Err(error) => { @@ -749,7 +824,7 @@ impl Sumeragi { self.broadcast_packet(msg); } - self.commit_block(committed_block, voting_block.block_state); + self.commit_block(committed_block, voting_block.state_block); return None; } @@ -828,7 +903,7 @@ fn reset_state( was_commit: &mut bool, topology: &mut Topology, voting_block: &mut Option, - voting_signatures: &mut Vec, + voting_signatures: &mut BTreeSet, last_view_change_time: &mut Instant, view_change_time: &mut Duration, ) { @@ -891,26 +966,30 @@ pub(crate) fn run( // Connect peers with initial topology sumeragi.connect_peers(&sumeragi.topology); + let genesis_account = AccountId::new( + iroha_genesis::GENESIS_DOMAIN_ID.clone(), + genesis_network.public_key.clone(), + ); + let span = span!(tracing::Level::TRACE, "genesis").entered(); - let is_genesis_peer = - if state.view().height() == 0 || state.view().latest_block_hash().is_none() { - if let Some(genesis) = genesis_network.genesis { - sumeragi.sumeragi_init_commit_genesis(genesis, &genesis_network.public_key, &state); - true - } else { - if let Err(err) = sumeragi.init_listen_for_genesis( - &genesis_network.public_key, - &state, - &mut shutdown_receiver, - ) { - info!(?err, "Sumeragi Thread is being shut down."); - return; - } - false - } + let is_genesis_peer = if state.view().height() == 0 + || state.view().latest_block_hash().is_none() + { + if let Some(genesis) = genesis_network.genesis { + sumeragi.init_commit_genesis(genesis, &genesis_account, &state); + true } else { + if let Err(err) = + sumeragi.init_listen_for_genesis(&genesis_account, &state, &mut shutdown_receiver) + { + info!(?err, "Sumeragi Thread is being shut down."); + return; + } false - }; + } + } else { + false + }; span.exit(); info!( @@ -921,7 +1000,7 @@ pub(crate) fn run( let mut voting_block = None; // Proxy tail collection of voting block signatures - let mut voting_signatures = Vec::new(); + let mut voting_signatures = BTreeSet::new(); let mut should_sleep = false; let mut view_change_proof_chain = ProofChain::default(); // Duration after which a view change is suggested @@ -996,7 +1075,7 @@ pub(crate) fn run( &state, &mut voting_block, view_change_index, - &genesis_network.public_key, + &genesis_account, &mut voting_signatures, is_genesis_peer, ); @@ -1077,24 +1156,6 @@ pub(crate) fn run( } } -fn add_signatures( - block: &mut VotingBlock, - signatures: impl IntoIterator, - topology: &Topology, -) { - for signature in signatures { - if let Err(error) = block.block.add_signature(signature, topology) { - let err_msg = "Signature not valid"; - - if EXPECT_VALID { - error!(?error, err_msg); - } else { - debug!(?error, err_msg); - } - } - } -} - /// Type enumerating early return types to reduce cyclomatic /// complexity of the main loop items and allow direct short /// circuiting with the `?` operator. Candidate for `impl @@ -1176,7 +1237,7 @@ fn handle_block_sync<'state, F: Fn(PipelineEventBox)>( chain_id: &ChainId, block: SignedBlock, state: &'state State, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, handle_events: &F, ) -> Result, (SignedBlock, BlockSyncError)> { let block_height = block @@ -1231,7 +1292,7 @@ fn handle_block_sync<'state, F: Fn(PipelineEventBox)>( block, &topology, chain_id, - genesis_public_key, + genesis_account, &mut state_block, ) .unpack(handle_events) @@ -1262,6 +1323,7 @@ fn handle_block_sync<'state, F: Fn(PipelineEventBox)>( #[cfg(test)] mod tests { + use iroha_genesis::GENESIS_DOMAIN_ID; use test_samples::gen_account_in; use tokio::test; @@ -1284,10 +1346,13 @@ mod tests { chain_id: &ChainId, topology: &Topology, leader_private_key: &PrivateKey, - ) -> (State, Arc, SignedBlock, PublicKey) { + ) -> (State, Arc, SignedBlock, AccountId) { // Predefined world state let (alice_id, alice_keypair) = gen_account_in("wonderland"); - let genesis_public_key = alice_keypair.public_key().clone(); + let genesis_account = AccountId::new( + GENESIS_DOMAIN_ID.clone(), + alice_keypair.public_key().clone(), + ); let account = Account::new(alice_id.clone()).build(&alice_id); let domain_id = "wonderland".parse().expect("Valid"); let mut domain = Domain::new(domain_id).build(&alice_id); @@ -1305,11 +1370,11 @@ mod tests { // Making two transactions that have the same instruction let tx = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([fail_box]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx = AcceptedTransaction::accept( tx, chain_id, - &state_block.transaction_executor().transaction_limits, + state_block.transaction_executor().transaction_limits, ) .expect("Valid"); @@ -1339,21 +1404,21 @@ mod tests { let tx1 = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([create_asset_definition1]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx1 = AcceptedTransaction::accept( tx1, chain_id, - &state_block.transaction_executor().transaction_limits, + state_block.transaction_executor().transaction_limits, ) .map(Into::into) .expect("Valid"); let tx2 = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions([create_asset_definition2]) - .sign(&alice_keypair); + .sign(alice_keypair.private_key()); let tx2 = AcceptedTransaction::accept( tx2, chain_id, - &state_block.transaction_executor().transaction_limits, + state_block.transaction_executor().transaction_limits, ) .map(Into::into) .expect("Valid"); @@ -1365,7 +1430,7 @@ mod tests { .unpack(|_| {}) }; - (state, kura, block.into(), genesis_public_key) + (state, kura, block.into(), genesis_account) } #[test] diff --git a/core/src/sumeragi/message.rs b/core/src/sumeragi/message.rs index 390763094f7..30bb1ae3ea3 100644 --- a/core/src/sumeragi/message.rs +++ b/core/src/sumeragi/message.rs @@ -1,5 +1,6 @@ //! Contains message structures for p2p communication during consensus. -use iroha_data_model::block::{BlockSignature, SignedBlock}; +use iroha_crypto::HashOf; +use iroha_data_model::block::{BlockPayload, BlockSignature, SignedBlock}; use iroha_macro::*; use parity_scale_codec::{Decode, Encode}; @@ -37,7 +38,6 @@ impl ControlFlowMessage { /// `BlockCreated` message structure. #[derive(Debug, Clone, Decode, Encode)] -#[non_exhaustive] pub struct BlockCreated { /// The corresponding block. pub block: SignedBlock, @@ -54,24 +54,32 @@ impl From<&ValidBlock> for BlockCreated { /// `BlockSigned` message structure. #[derive(Debug, Clone, Decode, Encode)] -#[non_exhaustive] pub struct BlockSigned { - /// Set of signatures. - pub signatures: Vec, + /// Hash of the block being signed. + pub hash: HashOf, + /// Signature of the block + pub signature: BlockSignature, } impl From<&ValidBlock> for BlockSigned { fn from(block: &ValidBlock) -> Self { Self { - signatures: block.as_ref().signatures().cloned().collect(), + hash: block.as_ref().hash_of_payload(), + signature: block + .as_ref() + .signatures() + .last() + .cloned() + .expect("INTERNAL BUG: Block must have at least one signature"), } } } /// `BlockCommitted` message structure. -#[derive(Debug, Clone, Decode, Encode)] -#[non_exhaustive] +#[derive(Debug, Clone, Encode)] pub struct BlockCommitted { + /// Hash of the block being signed. + pub hash: HashOf, /// Set of signatures. pub signatures: Vec, } @@ -79,6 +87,7 @@ pub struct BlockCommitted { impl From<&CommittedBlock> for BlockCommitted { fn from(block: &CommittedBlock) -> Self { Self { + hash: block.as_ref().hash(), signatures: block.as_ref().signatures().cloned().collect(), } } @@ -86,7 +95,6 @@ impl From<&CommittedBlock> for BlockCommitted { /// `BlockSyncUpdate` message structure #[derive(Debug, Clone, Decode, Encode)] -#[non_exhaustive] pub struct BlockSyncUpdate { /// The corresponding block. pub block: SignedBlock, @@ -100,3 +108,56 @@ impl From<&SignedBlock> for BlockSyncUpdate { } } } + +mod candidate { + use indexmap::IndexSet; + use parity_scale_codec::Input; + + use super::*; + + #[derive(Decode)] + struct BlockCommittedCandidate { + /// Hash of the block being signed. + pub hash: HashOf, + /// Set of signatures. + pub signatures: Vec, + } + + impl BlockCommittedCandidate { + fn validate(self) -> Result { + self.validate_signatures()?; + + Ok(BlockCommitted { + hash: self.hash, + signatures: self.signatures, + }) + } + + fn validate_signatures(&self) -> Result<(), &'static str> { + if self.signatures.is_empty() { + return Err("No signatures in block"); + } + + self.signatures + .iter() + .map(|signature| &signature.0) + .try_fold(IndexSet::new(), |mut acc, elem| { + if !acc.insert(elem) { + return Err("Duplicate signature"); + } + + Ok(acc) + })?; + + Ok(()) + } + } + + impl Decode for BlockCommitted { + fn decode(input: &mut I) -> Result { + BlockCommittedCandidate::decode(input)? + .validate() + .map_err(Into::into) + } + } +} diff --git a/core/src/sumeragi/mod.rs b/core/src/sumeragi/mod.rs index 0592d6221ec..fc9c4b332c8 100644 --- a/core/src/sumeragi/mod.rs +++ b/core/src/sumeragi/mod.rs @@ -10,7 +10,7 @@ use std::{ use eyre::Result; use iroha_config::parameters::actual::{Common as CommonConfig, Sumeragi as SumeragiConfig}; -use iroha_data_model::{block::SignedBlock, prelude::*}; +use iroha_data_model::{account::AccountId, block::SignedBlock, prelude::*}; use iroha_genesis::GenesisTransaction; use iroha_logger::prelude::*; use network_topology::{Role, Topology}; @@ -73,7 +73,7 @@ impl SumeragiHandle { fn replay_block( chain_id: &ChainId, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, block: &SignedBlock, state_block: &mut StateBlock<'_>, events_sender: &EventsSender, @@ -86,18 +86,18 @@ impl SumeragiHandle { block.clone(), topology, chain_id, - genesis_public_key, + genesis_account, state_block, ) .unpack(|e| { let _ = events_sender.send(e.into()); }) - .expect("Kura: Invalid block") + .expect("INTERNAL BUG: Invalid block stored in Kura") .commit(topology) .unpack(|e| { let _ = events_sender.send(e.into()); }) - .expect("Kura: Invalid block"); + .expect("INTERNAL BUG: Invalid block stored in Kura"); if block.as_ref().header().is_genesis() { *state_block.world.trusted_peers_ids = @@ -147,10 +147,7 @@ impl SumeragiHandle { { let state_view = state.view(); - let skip_block_count: usize = state_view - .height() - .try_into() - .expect("Blockchain height should fit into usize"); + let skip_block_count = state_view.height(); blocks_iter = (skip_block_count + 1..=block_count).map(|block_height| { NonZeroUsize::new(block_height).and_then(|height| kura.get_block_by_height(height)).expect( "Sumeragi should be able to load the block that was reported as presented. \ @@ -181,11 +178,16 @@ impl SumeragiHandle { }; } + let genesis_account = AccountId::new( + iroha_genesis::GENESIS_DOMAIN_ID.clone(), + genesis_network.public_key.clone(), + ); + for block in blocks_iter { let mut state_block = state.block(); Self::replay_block( &common_config.chain, - &genesis_network.public_key, + &genesis_account, &block, &mut state_block, &events_sender, @@ -234,7 +236,7 @@ impl SumeragiHandle { .spawn(move || { main_loop::run(genesis_network, sumeragi, shutdown_receiver, state); }) - .expect("Sumeragi thread spawn should not fail.") + .expect("INTERNAL BUG: Sumeragi thread spawn failed") }; let shutdown = move || { @@ -270,7 +272,7 @@ pub struct VotingBlock<'state> { /// At what time has this peer voted for this block pub voted_at: Instant, /// [`WorldState`] after applying transactions to it but before it was committed - pub block_state: StateBlock<'state>, + pub state_block: StateBlock<'state>, } impl AsRef for VotingBlock<'_> { @@ -285,7 +287,7 @@ impl VotingBlock<'_> { VotingBlock { block, voted_at: Instant::now(), - block_state: state_block, + state_block, } } } diff --git a/core/src/sumeragi/network_topology.rs b/core/src/sumeragi/network_topology.rs index 0b7ec997c4b..4b58d52a5b3 100644 --- a/core/src/sumeragi/network_topology.rs +++ b/core/src/sumeragi/network_topology.rs @@ -20,7 +20,12 @@ use iroha_data_model::{ /// /// Above is an illustration of how the various operations work for a f = 2 topology. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Topology(Vec, usize); +pub struct Topology( + /// Ordered set of peers + Vec, + /// Current view change index. Reset to 0 after every block commit + usize, +); /// Topology with at least one peer #[derive(Debug, Clone, PartialEq, Eq, derive_more::Deref)] @@ -147,9 +152,11 @@ impl Topology { }; } - signatures - .into_iter() - .filter(move |signature| filtered.contains(&(signature.0 as usize))) + signatures.into_iter().filter(move |signature| { + filtered.contains( + &(usize::try_from(signature.0).expect("Peer index should fit into usize")), + ) + }) } /// What role does this peer have in the topology. @@ -209,13 +216,17 @@ impl Topology { self.0 = topology.into_iter().map(|(_, peer)| peer).collect(); } - /// Recreate topology for given block + /// Rotate topology after a block has been committed pub fn block_committed( &mut self, block: &SignedBlock, new_peers: impl IntoIterator, ) { - self.lift_up_peers(block.signatures().map(|s| s.0 as usize)); + self.lift_up_peers( + block + .signatures() + .map(|s| s.0.try_into().expect("Peer index should fit into usize")), + ); self.rotate_set_a(); self.update_peer_list(new_peers); self.1 = 0; diff --git a/core/src/sumeragi/view_change.rs b/core/src/sumeragi/view_change.rs index 52ea5b859f8..fb698c72eb5 100644 --- a/core/src/sumeragi/view_change.rs +++ b/core/src/sumeragi/view_change.rs @@ -2,6 +2,7 @@ //! Where view change is a process of changing topology due to some faulty network behavior. use eyre::Result; +use indexmap::IndexSet; use iroha_crypto::{HashOf, PublicKey, SignatureOf}; use iroha_data_model::block::SignedBlock; use parity_scale_codec::{Decode, Encode}; @@ -43,7 +44,9 @@ pub struct ProofBuilder(SignedViewChangeProof); impl ProofBuilder { /// Constructor from index. pub fn new(latest_block: HashOf, view_change_index: usize) -> Self { - let view_change_index = view_change_index as u32; + let view_change_index = view_change_index + .try_into() + .expect("INTERNAL BUG: Blockchain height should fit into usize"); let proof = SignedViewChangeProof { payload: ViewChangeProofPayload { @@ -67,13 +70,21 @@ impl ProofBuilder { impl SignedViewChangeProof { /// Verify the signatures of `other` and add them to this proof. fn merge_signatures(&mut self, other: Vec, topology: &Topology) { - for (public_key, signature) in other { - if topology.position(&public_key).is_none() { - continue; - } - - self.signatures.push((public_key, signature)); - } + let signatures = core::mem::take(&mut self.signatures) + .into_iter() + .collect::>(); + + self.signatures = other + .into_iter() + .fold(signatures, |mut acc, (public_key, signature)| { + if topology.position(&public_key).is_some() { + acc.insert((public_key, signature)); + } + + acc + }) + .into_iter() + .collect(); } /// Verify if the proof is valid, given the peers in `topology`. @@ -104,8 +115,10 @@ impl ProofChain { .iter() .enumerate() .take_while(|(i, proof)| { + let view_change_index = proof.payload.view_change_index as usize; + proof.payload.latest_block == latest_block - && proof.payload.view_change_index as usize == *i + && view_change_index == *i && proof.verify(topology) }) .count() @@ -118,8 +131,8 @@ impl ProofChain { .iter() .enumerate() .take_while(|(i, proof)| { - proof.payload.latest_block == latest_block - && proof.payload.view_change_index as usize == *i + let view_change_index = proof.payload.view_change_index as usize; + proof.payload.latest_block == latest_block && view_change_index == *i }) .count(); self.0.truncate(valid_count); @@ -216,17 +229,28 @@ mod candidate { fn validate(self) -> Result { self.validate_signatures()?; - // TODO: If it is possible, we should instead reject decoding proofs that - // have duplicated signatures. This would require change in the code as well - let unique_signatures = self.signatures.into_iter().collect::>(); - Ok(SignedViewChangeProof { + signatures: self.signatures, payload: self.payload, - signatures: unique_signatures.into_iter().collect(), }) } fn validate_signatures(&self) -> Result<(), &'static str> { + if self.signatures.is_empty() { + return Err("Proof missing signatures"); + } + + self.signatures + .iter() + .map(|signature| &signature.0) + .try_fold(IndexSet::new(), |mut acc, elem| { + if !acc.insert(elem) { + return Err("Duplicate signature"); + } + + Ok(acc) + })?; + self.signatures .iter() .try_for_each(|(public_key, payload)| { diff --git a/core/src/tx.rs b/core/src/tx.rs index 89ae6700f41..1bad2077dd4 100644 --- a/core/src/tx.rs +++ b/core/src/tx.rs @@ -14,9 +14,7 @@ pub use iroha_data_model::prelude::*; use iroha_data_model::{ isi::error::Mismatch, query::error::FindError, - transaction::{ - error::TransactionLimitError, TransactionLimits, TransactionPayload, TransactionSignature, - }, + transaction::{error::TransactionLimitError, TransactionLimits, TransactionPayload}, }; use iroha_genesis::GenesisTransaction; use iroha_logger::{debug, error}; @@ -71,7 +69,7 @@ impl AcceptedTransaction { pub fn accept_genesis( tx: GenesisTransaction, expected_chain_id: &ChainId, - genesis_public_key: &PublicKey, + genesis_account: &AccountId, ) -> Result { let actual_chain_id = tx.0.chain(); @@ -82,13 +80,8 @@ impl AcceptedTransaction { })); } - let TransactionSignature(public_key, signature) = tx.0.signature(); - if public_key != genesis_public_key { - return Err(SignatureVerificationFail { - signature: signature.clone(), - reason: "Signature doesn't correspond to genesis public key".to_string(), - } - .into()); + if genesis_account != tx.0.authority() { + return Err(AcceptTransactionFail::UnexpectedGenesisAccountSignature); } Ok(Self(tx.0)) @@ -102,7 +95,7 @@ impl AcceptedTransaction { pub fn accept( tx: SignedTransaction, expected_chain_id: &ChainId, - limits: &TransactionLimits, + limits: TransactionLimits, ) -> Result { let actual_chain_id = tx.chain(); @@ -113,10 +106,6 @@ impl AcceptedTransaction { })); } - if *iroha_genesis::GENESIS_DOMAIN_ID == *tx.authority().domain() { - return Err(AcceptTransactionFail::UnexpectedGenesisAccountSignature); - } - match &tx.instructions() { Executable::Instructions(instructions) => { let instruction_count = instructions.len(); diff --git a/core/test_network/src/lib.rs b/core/test_network/src/lib.rs index 3dd3f4b3e8b..3ef68c196b4 100644 --- a/core/test_network/src/lib.rs +++ b/core/test_network/src/lib.rs @@ -1,6 +1,5 @@ //! Module for starting peers and networks. Used only for tests use core::{fmt::Debug, str::FromStr as _, time::Duration}; -#[cfg(debug_assertions)] use std::{collections::BTreeMap, ops::Deref, path::Path, sync::Arc, thread}; use eyre::Result; @@ -342,7 +341,6 @@ pub fn wait_for_genesis_committed_with_max_retries( let ready_peers = clients .iter() .map(|client| { - println!("KITA: {:?}", client.get_status().unwrap()); let is_ready = match client.get_status() { Ok(status) => status.blocks >= 1, Err(error) => { @@ -354,7 +352,6 @@ pub fn wait_for_genesis_committed_with_max_retries( }) .sum::(); - println!("Ready peers: {ready_peers}"); let without_genesis_peers = clients.len() as u32 - ready_peers; if without_genesis_peers <= offline_peers { return; diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index aa7270b6e00..dfe5fa32471 100755 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -839,12 +839,6 @@ pub mod error { /// Returned when an error occurs during key generation #[display(fmt = "Key generation failed. {_0}")] KeyGen(String), - /// Returned when an error occurs during digest generation - #[display(fmt = "Digest generation failed. {_0}")] - DigestGen(String), - /// Returned when an error occurs during creation of [`SignaturesOf`] - #[display(fmt = "`SignaturesOf` must contain at least one signature")] - EmptySignatureIter, /// A General purpose error message that doesn't fit in any category #[display(fmt = "General error. {_0}")] // This is going to cause a headache Other(String), diff --git a/data_model/src/block.rs b/data_model/src/block.rs index 4df24e59d32..1f601d9e4d1 100644 --- a/data_model/src/block.rs +++ b/data_model/src/block.rs @@ -54,12 +54,11 @@ mod model { #[getset(get_copy = "pub")] pub height: u64, /// Hash of the previous block in the chain. - #[getset(get = "pub")] + #[getset(get_copy = "pub")] pub prev_block_hash: Option>, /// Hash of merkle tree root of transactions' hashes. - #[getset(get = "pub")] - // TODO: How can it be `None`??? - pub transactions_hash: Option>>, + #[getset(get_copy = "pub")] + pub transactions_hash: HashOf>, /// Creation timestamp (unix time in milliseconds). #[getset(skip)] pub timestamp_ms: u64, @@ -236,6 +235,12 @@ impl SignedBlock { signature: BlockSignature, public_key: &iroha_crypto::PublicKey, ) -> Result<(), iroha_crypto::Error> { + if self.signatures().any(|s| signature.0 == s.0) { + return Err(iroha_crypto::Error::Signing( + "Duplicate signature".to_owned(), + )); + } + signature.1.verify(public_key, self.payload())?; let SignedBlock::V1(block) = self; @@ -297,10 +302,6 @@ mod candidate { self.validate_signatures()?; self.validate_header()?; - if self.payload.transactions.is_empty() { - return Err("Block is empty"); - } - Ok(SignedBlockV1 { signatures: self.signatures, payload: self.payload, @@ -308,8 +309,13 @@ mod candidate { } fn validate_signatures(&self) -> Result<(), &'static str> { + if self.signatures.is_empty() { + return Err("Block missing signatures"); + } + self.signatures .iter() + .map(|signature| signature.0) .try_fold(BTreeSet::new(), |mut acc, elem| { if !acc.insert(elem) { return Err("Duplicate signature in block"); @@ -320,6 +326,7 @@ mod candidate { Ok(()) } + fn validate_header(&self) -> Result<(), &'static str> { let actual_txs_hash = self.payload.header.transactions_hash; @@ -329,7 +336,8 @@ mod candidate { .iter() .map(|value| value.as_ref().hash()) .collect::>() - .hash(); + .hash() + .ok_or("Block is empty")?; if expected_txs_hash != actual_txs_hash { return Err("Transactions' hash incorrect. Expected: {expected_txs_hash:?}, actual: {actual_txs_hash:?}"); diff --git a/data_model/src/events/pipeline.rs b/data_model/src/events/pipeline.rs index 9d1b5b5b260..731e194c505 100644 --- a/data_model/src/events/pipeline.rs +++ b/data_model/src/events/pipeline.rs @@ -354,7 +354,9 @@ mod tests { Self { height, prev_block_hash: None, - transactions_hash: None, + transactions_hash: HashOf::from_untyped_unchecked(Hash::prehashed( + [1_u8; Hash::LENGTH], + )), timestamp_ms: 0, view_change_index: 0, consensus_estimation_ms: 0, diff --git a/data_model/src/lib.rs b/data_model/src/lib.rs index e46b5daaf5f..aecffef2df4 100644 --- a/data_model/src/lib.rs +++ b/data_model/src/lib.rs @@ -623,7 +623,17 @@ mod model { /// Unique id of blockchain #[derive( - Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Encode, Deserialize, Serialize, IntoSchema, + Debug, + Display, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Encode, + Deserialize, + Serialize, + IntoSchema, )] #[repr(transparent)] #[serde(transparent)] diff --git a/data_model/src/query/mod.rs b/data_model/src/query/mod.rs index 4be2e03295d..1c89b0afa79 100644 --- a/data_model/src/query/mod.rs +++ b/data_model/src/query/mod.rs @@ -1280,7 +1280,7 @@ pub mod http { Serialize, IntoSchema, )] - pub struct QuerySignature(pub PublicKey, pub SignatureOf); + pub struct QuerySignature(pub SignatureOf); /// I/O ready structure to send queries. #[derive(Debug, Clone, Encode, Serialize, IntoSchema)] @@ -1323,11 +1323,11 @@ pub mod http { impl SignedQueryCandidate { fn validate(self) -> Result { - let QuerySignature(public_key, signature) = &self.signature; + let QuerySignature(signature) = &self.signature; - if signature.verify(public_key, &self.payload).is_err() { - return Err("Query signature not valid"); - } + signature + .verify(&self.payload.authority.signatory, &self.payload) + .map_err(|_| "Query signature is not valid")?; Ok(SignedQueryV1 { payload: self.payload, @@ -1450,7 +1450,7 @@ pub mod http { let signature = SignatureOf::new(key_pair.private_key(), &self.payload); SignedQueryV1 { - signature: QuerySignature(key_pair.public_key().clone(), signature), + signature: QuerySignature(signature), payload: self.payload, } .into() @@ -1500,12 +1500,6 @@ pub mod error { )] #[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum QueryExecutionFail { - /// Query has the wrong signature: {0} - Signature( - #[skip_from] - #[skip_try_from] - String, - ), /// {0} #[cfg_attr(feature = "std", error(transparent))] Find(FindError), diff --git a/data_model/src/transaction.rs b/data_model/src/transaction.rs index 7245db6fb61..0ed738a7398 100644 --- a/data_model/src/transaction.rs +++ b/data_model/src/transaction.rs @@ -158,10 +158,7 @@ mod model { Serialize, IntoSchema, )] - pub struct TransactionSignature( - pub iroha_crypto::PublicKey, - pub SignatureOf, - ); + pub struct TransactionSignature(pub SignatureOf); /// Transaction that contains a signature /// @@ -263,6 +260,13 @@ declare_versioned!(SignedTransaction 1..2, Debug, Display, Clone, PartialEq, Eq, declare_versioned!(SignedTransaction 1..2, Debug, Display, Clone, PartialEq, Eq, PartialOrd, Ord, FromVariant, IntoSchema); impl SignedTransaction { + /// Transaction payload. Used for tests + #[cfg(feature = "transparent_api")] + pub fn payload(&self) -> &TransactionPayload { + let SignedTransaction::V1(tx) = self; + &tx.payload + } + /// Return transaction instructions #[inline] pub fn instructions(&self) -> &Executable { @@ -383,10 +387,11 @@ mod candidate { } fn validate_signature(&self) -> Result<(), &'static str> { - let TransactionSignature(public_key, signature) = &self.signature; + let TransactionSignature(signature) = &self.signature; + signature - .verify(public_key, &self.payload) - .map_err(|_| "Transaction contains invalid signatures")?; + .verify(&self.payload.authority.signatory, &self.payload) + .map_err(|_| "Transaction signature is invalid")?; Ok(()) } @@ -757,11 +762,8 @@ mod http { /// Sign transaction with provided key pair. #[must_use] - pub fn sign(self, key_pair: &iroha_crypto::KeyPair) -> SignedTransaction { - let signature = TransactionSignature( - key_pair.public_key().clone(), - SignatureOf::new(key_pair.private_key(), &self.payload), - ); + pub fn sign(self, private_key: &iroha_crypto::PrivateKey) -> SignedTransaction { + let signature = TransactionSignature(SignatureOf::new(private_key, &self.payload)); SignedTransactionV1 { signature, diff --git a/docs/source/references/schema.json b/docs/source/references/schema.json index 177acd2bc8a..3cb73c6c563 100644 --- a/docs/source/references/schema.json +++ b/docs/source/references/schema.json @@ -612,7 +612,7 @@ }, { "name": "transactions_hash", - "type": "Option>>" + "type": "HashOf>" }, { "name": "timestamp_ms", @@ -2498,9 +2498,6 @@ "Option": { "Option": "Duration" }, - "Option>>": { - "Option": "HashOf>" - }, "Option>": { "Option": "HashOf" }, @@ -2938,32 +2935,27 @@ }, "QueryExecutionFail": { "Enum": [ - { - "tag": "Signature", - "discriminant": 0, - "type": "String" - }, { "tag": "Find", - "discriminant": 1, + "discriminant": 0, "type": "FindError" }, { "tag": "Conversion", - "discriminant": 2, + "discriminant": 1, "type": "String" }, { "tag": "UnknownCursor", - "discriminant": 3 + "discriminant": 2 }, { "tag": "FetchSizeTooBig", - "discriminant": 4 + "discriminant": 3 }, { "tag": "InvalidSingularParameters", - "discriminant": 5 + "discriminant": 4 } ] }, @@ -3054,12 +3046,7 @@ } ] }, - "QuerySignature": { - "Tuple": [ - "PublicKey", - "SignatureOf" - ] - }, + "QuerySignature": "SignatureOf", "Register": { "Struct": [ { @@ -3937,12 +3924,7 @@ } ] }, - "TransactionSignature": { - "Tuple": [ - "PublicKey", - "SignatureOf" - ] - }, + "TransactionSignature": "SignatureOf", "TransactionStatus": { "Enum": [ { diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index acc6c491b1a..b2939b83319 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -99,7 +99,7 @@ fn build_and_sign_genesis( ); let transaction = TransactionBuilder::new(chain_id, genesis_account_id) .with_instructions(instructions) - .sign(genesis_key_pair); + .sign(genesis_key_pair.private_key()); GenesisTransaction(transaction) } diff --git a/smart_contract/executor/src/permission.rs b/smart_contract/executor/src/permission.rs index 6aa5c7d2a13..a6ead932324 100644 --- a/smart_contract/executor/src/permission.rs +++ b/smart_contract/executor/src/permission.rs @@ -117,7 +117,7 @@ pub mod asset_definition { /// Check if `authority` is the owner of asset definition - /// `authority` is owner of asset_definition if: + /// `authority` is owner of asset definition if: /// - `asset_definition.owned_by` is `authority` /// - `asset_definition.domain_id` domain is owned by `authority` /// diff --git a/torii/src/lib.rs b/torii/src/lib.rs index a987608ee3f..4db82d4c7f1 100644 --- a/torii/src/lib.rs +++ b/torii/src/lib.rs @@ -338,7 +338,6 @@ impl Error { Config(_) | StatusSegmentNotFound(_) => StatusCode::NOT_FOUND, PushIntoQueue(err) => match **err { queue::Error::Full => StatusCode::INTERNAL_SERVER_ERROR, - queue::Error::SignatoryInconsistent => StatusCode::UNAUTHORIZED, _ => StatusCode::BAD_REQUEST, }, #[cfg(feature = "telemetry")] @@ -363,7 +362,6 @@ impl Error { Conversion(_) | UnknownCursor | FetchSizeTooBig | InvalidSingularParameters => { StatusCode::BAD_REQUEST } - Signature(_) => StatusCode::UNAUTHORIZED, Find(_) => StatusCode::NOT_FOUND, }, TooComplex => StatusCode::UNPROCESSABLE_ENTITY, diff --git a/torii/src/routing.rs b/torii/src/routing.rs index 623619422e4..87c2381673e 100644 --- a/torii/src/routing.rs +++ b/torii/src/routing.rs @@ -53,7 +53,7 @@ pub async fn handle_transaction( ) -> Result { let state_view = state.view(); let transaction_limits = state_view.config.transaction_limits; - let transaction = AcceptedTransaction::accept(transaction, &chain_id, &transaction_limits) + let transaction = AcceptedTransaction::accept(transaction, &chain_id, transaction_limits) .map_err(Error::AcceptTransaction)?; queue .push(transaction, &state_view)